首页 > 分享 > tensorflow: 花卉分类

tensorflow: 花卉分类

 本文主要通过CNN进行花卉的分类,训练结束保存模型,最后通过调用模型,输入花卉的图片通过模型来进行类别的预测。

       测试平台:win 10+tensorflow 1.2

       数据集:http://download.tensorflow.org/example_images/flower_photos.tgz

       数据集中总共有五种花,分别放在五个文件夹下。

       一、CNN训练模型

       模型尺寸分析:卷积层全都采用了补0,所以经过卷积层长和宽不变,只有深度加深。池化层全都没有补0,所以经过池化层长和宽均减小,深度不变。

       模型尺寸变化:100×100×3->100×100×32->50×50×32->50×50×64->25×25×64->25×25×128->12×12×128->12×12×128->6×6×128

       CNN训练代码如下:

from skimage import io,transform

import glob

import os

import tensorflow as tf

import numpy as np

import time

path='E:/data/datasets/flower_photos/'

model_path='E:/data/model/flower/model.ckpt'

w=100

h=100

c=3

def read_img(path):

cate=[path+x for x in os.listdir(path) if os.path.isdir(path+x)]

imgs=[]

labels=[]

for idx,folder in enumerate(cate):

for im in glob.glob(folder+'/*.jpg'):

print('reading the images:%s'%(im))

img=io.imread(im)

img=transform.resize(img,(w,h))

imgs.append(img)

labels.append(idx)

return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)

data,label=read_img(path)

num_example=data.shape[0]

arr=np.arange(num_example)

np.random.shuffle(arr)

data=data[arr]

label=label[arr]

ratio=0.8

s=np.int(num_example*ratio)

x_train=data[:s]

y_train=label[:s]

x_val=data[s:]

y_val=label[s:]

x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x')

y_=tf.placeholder(tf.int32,shape=[None,],name='y_')

def inference(input_tensor, train, regularizer):

with tf.variable_scope('layer1-conv1'):

conv1_weights = tf.get_variable("weight",[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))

conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))

conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')

relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

with tf.name_scope("layer2-pool1"):

pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding="VALID")

with tf.variable_scope("layer3-conv2"):

conv2_weights = tf.get_variable("weight",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))

conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))

conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')

relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

with tf.name_scope("layer4-pool2"):

pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

with tf.variable_scope("layer5-conv3"):

conv3_weights = tf.get_variable("weight",[3,3,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1))

conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))

conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')

relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))

with tf.name_scope("layer6-pool3"):

pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

with tf.variable_scope("layer7-conv4"):

conv4_weights = tf.get_variable("weight",[3,3,128,128],initializer=tf.truncated_normal_initializer(stddev=0.1))

conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))

conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')

relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))

with tf.name_scope("layer8-pool4"):

pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

nodes = 6*6*128

reshaped = tf.reshape(pool4,[-1,nodes])

with tf.variable_scope('layer9-fc1'):

fc1_weights = tf.get_variable("weight", [nodes, 1024],

initializer=tf.truncated_normal_initializer(stddev=0.1))

if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))

fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))

fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)

if train: fc1 = tf.nn.dropout(fc1, 0.5)

with tf.variable_scope('layer10-fc2'):

fc2_weights = tf.get_variable("weight", [1024, 512],

initializer=tf.truncated_normal_initializer(stddev=0.1))

if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))

fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))

fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)

if train: fc2 = tf.nn.dropout(fc2, 0.5)

with tf.variable_scope('layer11-fc3'):

fc3_weights = tf.get_variable("weight", [512, 5],

initializer=tf.truncated_normal_initializer(stddev=0.1))

if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))

fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))

logit = tf.matmul(fc2, fc3_weights) + fc3_biases

return logit

regularizer = tf.contrib.layers.l2_regularizer(0.0001)

logits = inference(x,False,regularizer)

b = tf.constant(value=1,dtype=tf.float32)

logits_eval = tf.multiply(logits,b,name='logits_eval')

loss=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)

train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)

acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):

assert len(inputs) == len(targets)

if shuffle:

indices = np.arange(len(inputs))

np.random.shuffle(indices)

for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):

if shuffle:

excerpt = indices[start_idx:start_idx + batch_size]

else:

excerpt = slice(start_idx, start_idx + batch_size)

yield inputs[excerpt], targets[excerpt]

n_epoch=10

batch_size=64

saver=tf.train.Saver()

sess=tf.Session()

sess.run(tf.global_variables_initializer())

for epoch in range(n_epoch):

start_time = time.time()

train_loss, train_acc, n_batch = 0, 0, 0

for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):

_,err,ac=sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a})

train_loss += err; train_acc += ac; n_batch += 1

print(" train loss: %f" % (np.sum(train_loss)/ n_batch))

print(" train acc: %f" % (np.sum(train_acc)/ n_batch))

val_loss, val_acc, n_batch = 0, 0, 0

for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):

err, ac = sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a})

val_loss += err; val_acc += ac; n_batch += 1

print(" validation loss: %f" % (np.sum(val_loss)/ n_batch))

print(" validation acc: %f" % (np.sum(val_acc)/ n_batch))

saver.save(sess,model_path)

sess.close()

 

二、调用模型进行预测

       调用模型进行花卉的预测,代码如下:

from skimage import io,transform

import tensorflow as tf

import numpy as np

path1 = "E:/data/datasets/flower_photos/daisy/5547758_eea9edfd54_n.jpg"

path2 = "E:/data/datasets/flower_photos/dandelion/7355522_b66e5d3078_m.jpg"

path3 = "E:/data/datasets/flower_photos/roses/394990940_7af082cf8d_n.jpg"

path4 = "E:/data/datasets/flower_photos/sunflowers/6953297_8576bf4ea3.jpg"

path5 = "E:/data/datasets/flower_photos/tulips/10791227_7168491604.jpg"

flower_dict = {0:'dasiy',1:'dandelion',2:'roses',3:'sunflowers',4:'tulips'}

w=100

h=100

c=3

def read_one_image(path):

img = io.imread(path)

img = transform.resize(img,(w,h))

return np.asarray(img)

with tf.Session() as sess:

data = []

data1 = read_one_image(path1)

data2 = read_one_image(path2)

data3 = read_one_image(path3)

data4 = read_one_image(path4)

data5 = read_one_image(path5)

data.append(data1)

data.append(data2)

data.append(data3)

data.append(data4)

data.append(data5)

saver = tf.train.import_meta_graph('E:/data/model/flower/model.ckpt.meta')

saver.restore(sess,tf.train.latest_checkpoint('E:/data/model/flower/'))

graph = tf.get_default_graph()

x = graph.get_tensor_by_name("x:0")

feed_dict = {x:data}

logits = graph.get_tensor_by_name("logits_eval:0")

classification_result = sess.run(logits,feed_dict)

print(classification_result)

print(tf.argmax(classification_result,1).eval())

output = []

output = tf.argmax(classification_result,1).eval()

for i in range(len(output)):

print("第",i+1,"朵花预测:"+flower_dict[output[i]])

本文的模型对于花卉的分类准确率大概在70%左右,采用迁移学习调用Inception-v3模型对本文中的花卉数据集分类准确率在95%左右。主要的原因在于本文的CNN模型较于简单,而且花卉数据集本身就比mnist手写数字数据集分类难度就要大一点,同样的模型在mnist手写数字的识别上准确率要比花卉数据集准确率高不少。

       本文的CNN模型完全可以通过增大模型复杂度或者改参数调试以及对图像进行预处理来提高准确率,但本文只是想记录一下最近的学习,这已经足够了。

       参考博客:http://www.cnblogs.com/denny402/p/6931338.html


相关知识

基于TensorFlow的CNN卷积网络模型花卉分类(1)
TensorFlow入门
TensorFlow学习记录(八)
TensorFlow 2建立神经网络分类模型——以iris数据为例
花卉识别(tensorflow)
基于tensorflow的花卉识别
深度学习之基于Tensorflow卷积神经网络花卉识别系统
Tensorflow鸢尾花分类(数据加载与特征处理)
TensorFlow机器学习实战指南——山鸢尾花分类
colab cnn实现花卉图片分类识别

网址: tensorflow: 花卉分类 https://m.huajiangbk.com/newsview171827.html

所属分类:花卉
上一篇: 常见的花卉类型,你能区分开吗?
下一篇: 花茶/花草茶的功效与作用