Tensorflow框架搭建卷积神经网络进行五种花的分类
数据集:
五种花:daisy、dandelion、roses、sunflowers、tulips
数据集下载:http://download.tensorflow.org/example_images/flower_photos.tgz
读取并随机打乱数据集: read_img.py
# -*- coding: utf-8 -*- """ Created on Wed Jun 26 09:23:46 2019 @author: ZQQ """ import numpy as np import os import glob from skimage import transform,io import random # 定义超参数 w = 100 h = 100 #### 定义读取图片的函数:read_img() def read_img(path): data_list = [path + x for x in os.listdir(path) if os.path.isdir(path + x)] # 所有图片分类目录 imgs = [] # 定义一个imgs空列表,存放遍历读取的图片 labels = [] # 定义一个labels空列表,存放图片标签 for idx, folder in enumerate(data_list): # 遍历每个文件夹中的图片,idx表示 for im in glob.glob(folder + '/*.jpg'): # *:匹配0个或多个字符 print('reading the images:%s' % (im)) img = io.imread(im) img = transform.resize(img, (w, h)) # 将所有图片的尺寸统一为:100*100(宽度*高度) with open('datasets_name.txt','a') as f: f.write(folder+im+'_'+str(idx)+'n') imgs.append(img) # 遍历后更改尺寸后的图片添加到imgs列表中 labels.append(idx) # 遍历后更改尺寸后的图片标签添加到labels列表中 return np.asarray(imgs, np.float32), np.asarray(labels, np.int32) # np.float32是类型 后面两个变量是没有进行np.asarray # np.asarray 和 np.array # np.array与np.asarray功能是一样的,都是将输入转为矩阵格式。 # 主要区别在于 np.array(默认情况下)将会copy该对象,而 np.asarray除非必要,否则不会copy该对象。 ### 定义随机打乱数据集的函数:shuffle_data() def shuffle_data(data,label): # 打乱顺序 data_size = data.shape[0] # 数据集个数 arr = np.arange(data_size) # 生成0到datasize个数 np.random.shuffle(arr) # 随机打乱arr数组 data = data[arr] # 将data以arr索引重新组合 label = label[arr] # 将label以arr索引重新组合 # # 打乱数据顺序的另一种方法,当然还有其他的方法 # index = [i for i in range(len(data))] # random.shuffle(index) # data = data[index] # label = label[index] # 将所有数据分为训练集和验证集 ratio = 0.8 # 训练集比例 num = np.int(len(data) * ratio) x_train = data[:num] y_train = label[:num] x_val = data[num:] y_val = label[num:] return x_train, y_train, x_val, y_val #path = 'flower_photos/' # 所有图片的总路径(目录) #data, label = read_img(path)
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364搭建卷积神经网络:cnn.py
# -*- coding: utf-8 -*- """ Created on Wed Jun 26 16:38:15 2019 @author: ZQQ tools:Pycharm 用tensorflo框架搭建一个卷积神经网络 参考: https://www.cnblogs.com/ansang/p/9164805.html 数据:http://download.tensorflow.org/example_images/flower_photos.tgz """ import tensorflow as tf # 定义批量标准化函数,有效防止了梯度消失和爆炸,还加速了收敛 # def batch_norm(x, momentum=0.9, epsilon=1e-5, train=True, name='bn'): # return tf.layers.batch_normalization(x, # momentum=momentum, # epsilon=epsilon, # scale=True, # training=train, # name=name) def simple_net(x): ### 卷积,池化 # 第一层卷积层(100->50) conv1 = tf.layers.conv2d(inputs=x, filters=32, #kernel_size=[3,3], # kernel_size = [5,5] 换不同的核大小,查看效果 kernel_size = [5,5], padding="same", activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) # padding="same",卷积层不改变图片大小 #conv1 = batch_norm(conv1, name="pw_bn1") # 加入批量标准化 pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2,2], strides=2) # 池化层图片大小缩小一半 # 第二个卷积层(50->25) conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[5,5], padding="same", activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) #conv2 = batch_norm(conv2, name="pw_bn2") pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2,2], strides=2) # 第三个卷积层(25->12) conv3 = tf.layers.conv2d(inputs=pool2, filters=128, kernel_size=[3,3], padding="same", activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) #conv3 = batch_norm(conv3, name="pw_bn3") pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2,2], strides=2) # 第四个卷积层(12->6) conv4 = tf.layers.conv2d(inputs=pool3, filters=128, kernel_size=[3,3], padding="same", activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) #conv4 = batch_norm(conv4, name="pw_bn4") pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2,2], strides=2) rel = tf.reshape(pool4,[-1, 6 * 6 * 128]) # 防止过拟合,加入dropout #dropout = tf.layers.dropout(inputs=rel, rate=0.5) ### 全连接层 dense1 = tf.layers.dense(inputs=rel, units=1024, activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003)) dense2 = tf.layers.dense(inputs=dense1, units=512, activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003)) logits = tf.layers.dense(inputs=dense2, units=5, # 5个类 activation=None, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003)) #pred = tf.nn.softmax(logits, name='prob') # softmax处理 #return logits, pred return logits ### 四个卷积层,两个全连接层,一个softmax层组成。 ### 在每一层的卷积后面加入batch_normalization, relu, 池化 ### batch_normalization层很好用,加了它之后,有效防止了梯度消失和爆炸,还加速了收敛。
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105定义一个批量取数据的函数:batch_get_data.py
# -*- coding: utf-8 -*- """ Created on Wed Jun 26 21:23:56 2019 @author: ZQQ """ import numpy as np # 定义一个函数,按批次取数据 def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): assert len(inputs) == len(targets) if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): if shuffle: excerpt = indices[start_idx:start_idx + batch_size] else: excerpt = slice(start_idx, start_idx + batch_size) yield inputs[excerpt], targets[excerpt]
123456789101112131415161718192021'实验结果:
进行100次epoch
总结:val loss 来看,有点过拟合,精度没有得到大的提高,后期加入dropout再进一步查看实验效果。
还可以进行批量标准化,加速收敛,并且可以防止梯度消失或者梯度爆炸。
以及加入softmax处理等。
独立测试:test_independent.py
训练得到模型保存后,加载,测试输入的图片
# -*- coding: utf-8 -*- """ Created on Sat Jun 29 14:57:15 2019 @author: ZQQ 参考: https://www.cnblogs.com/ansang/p/9164805.html """ import numpy as np import tensorflow as tf from PIL import Image, ImageDraw, ImageFont from models import cnn import matplotlib.pyplot as plt # 将所有的图片resize成100*100 w = 100 h = 100 c = 3 classes = ['daisy','dandelion','roses','sunflowers','tulips'] image_test = Image.open('flowers_photos_independent/test1.jpg') plt.imshow(image_test) plt.show() resized_image = image_test.resize((w, h), Image.BICUBIC) image_data = np.array(resized_image, dtype='float32') imgs_holder = tf.placeholder(tf.float32, shape=[1, w, h, c]) logits = cnn.simple_net(imgs_holder) saver = tf.train.Saver() ckpt_dir = 'result/model_save/' with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(ckpt_dir) saver.restore(sess, ckpt.model_checkpoint_path) classes_ = sess.run(logits,feed_dict={ imgs_holder: np.reshape(image_data , [1,w, h, c])}) num = np.argmax(classes_) print('class is :',classes[int(num)],' Probability is :',classes_[0][int(num)])
1234567891011121314151617181920212223242526272829303132333435363738394041424344项目整体结构:
代码后期,将移植到github上,尽情期待。
坚持,感恩社会,努力,加油!
注:还可以用几种比较典型的卷积神经网络进行实现,比如lenet、alexnet、vggnet等等。
源码:
https://github.com/AugustMe/TensorFlow-Learn/tree/master/FlowersImageClassify
相关知识
“花朵分类“ 手把手搭建【卷积神经网络】
基于卷积神经网络的农作物病虫害识别系统
深度学习之基于Tensorflow卷积神经网络花卉识别系统
基于卷积神经网络的花卉图像分类研究(32页)
【花卉识别系统】Python+卷积神经网络算法+人工智能+深度学习+图像识别+算法模型
Tensorflow学习笔记——搭建神经网络
TensorFlow学习记录(八)
花朵识别系统Python+卷积神经网络算法+人工智能+深度学习+计算机课设项目+TensorFlow+模型训练
神经网络与深度学习
基于卷积神经网络的花卉识别方法
网址: Tensorflow框架搭建卷积神经网络进行五种花的分类 https://www.huajiangbk.com/newsview772671.html
上一篇: 雏菊、蒲公英、玫瑰、向日葵、郁金 |
下一篇: 兰花叶子卷起来了,怎么办?(教你 |
推荐分享

- 1君子兰什么品种最名贵 十大名 4012
- 2世界上最名贵的10种兰花图片 3364
- 3花圈挽联怎么写? 3286
- 4迷信说家里不能放假花 家里摆 1878
- 5香山红叶什么时候红 1493
- 6花的意思,花的解释,花的拼音 1210
- 7教师节送什么花最合适 1167
- 8勿忘我花图片 1103
- 9橄榄枝的象征意义 1093
- 10洛阳的市花 1039