卷积神经网络(Convolutionnal Neural Network, CNN)是一种前馈神经网络,它的人工神经单元可以响应一部分范围内的的周围单元,对于大型图像有出色的表现。
卷积神经网络通常由一个或者多个卷积层(convolution layer)和顶端的全连接(full connect layer)组成,在应用中我们通常还会在卷积层后面加上一个池化层(pooling layer)。
使用的框架为tensorflow,这是google开发的一款开源的机器学习实验与开发平台,他为我们实现了机器学习中很多的底层api,同时他提供了丰富的python接口和文档,在对c和java,c++相关的接口和文档就没有那么完善,因此本次的编程语言选用的是python。
在多次模型的修改中,本次模型建造如下,包括一个输入层,两个卷积层,两个池化层,两个全输入层。
* 输入层:对训练数据文件夹中的图片进行大小的调整,如果不进行调整,会造成内存资源的耗尽。处理完之后会进行数据的归一化(正则化 normalized)。
* 卷基层:进行卷积运算,对权重(weight)和偏离量(bias)进行训练,这就是我们抽象意义上的“特征”。深层学习(deeping learning)与早期的浅层学习不同的是,深层学习重点是学习”特征”,而浅层学习则是另外一个思路,它靠的的是人工来建模,人工来构建特征,这需要建模着对这个问题有着深刻的理解。
* 池化层: 图片数据的像素(pixel)很多,在转化成张量(tensor)时维度很高,所以我们对它进行压缩,它主要利用的是图片像素点周围的点与之差不多,所以就选取这个点来代表这个区域。它改变的是张量的长和宽,但是不会改变张量的厚度。如下图1所示:
-----cat.2.jpg
----~~~~~~~~~
------dog.1.jpg
------dog.2.jpg
------dog.3.jpg
+---test_data
------cat.1.jpg
------cat.2.jpg
------dog.1.jpg
------dog.2.jpg
------dog.3.jpg
import matplotlib.pyplot as plt
import numpy as np
import os
from IPython.display import display, Image, HTML
import cv2
import tensorflow as tf
#训练图片文件夹
TRAIN_DIR = 'C:\\Users\\longyiyuan\\Desktop\\train_data\\'
#测试图片文件夹
TEST_DIR = 'C:\\Users\\longyiyuan\\Desktop\\test_data\\'
#图片的大小
IMAGE_SIZE = 64
#图片的厚度,rgb为3,灰度图片为1.
CHANNELS = 3
pixel_depth = 255.0
#模型输出文件
OUTFILE = '../smalltest.npsave.bin'
TRAINING_AND_VALIDATION_SIZE_DOGS = 10000
TRAINING_AND_VALIDATION_SIZE_CATS = 10000
TRAINING_AND_VALIDATION_SIZE_ALL = 20000
TEST_SIZE_DOGS = 250
TEST_SIZE_CATS = 250
TRAINING_SIZE = 20000
TEST_SIZE_ALL = 500
第一步:
#将文件夹中图片的路径全部提取出来
train_images = [TRAIN_DIR + i for i in os.listdir(TRAIN_DIR)]
train_dogs = [TRAIN_DIR + i for i in os.listdir(TRAIN_DIR) if 'dog' in i]
train_cats = [TRAIN_DIR + i for i in os.listdir(TRAIN_DIR) if 'cat' in i]
test_dogs = [TEST_DIR + i for i in os.listdir(TRAIN_DIR) if 'dog' in i]
test_cats = [TEST_DIR + i for i in os.listdir(TRAIN_DIR) if 'cat' in i]
test_images = [TEST_DIR + i for i in os.listdir(TEST_DIR)]
#将猫和狗的路径列表组合起来
train_images = train_dogs[:TRAINING_AND_VALIDATION_SIZE_DOGS] + train_cats[:TRAINING_AND_VALIDATION_SIZE_CATS]
#构建同样顺序的狗和猫的标签数组
train_labels = np.array((['dogs'] * TRAINING_AND_VALIDATION_SIZE_DOGS) + ['cats'] * TRAINING_AND_VALIDATION_SIZE_CATS)
test_images = test_dogs[:TEST_SIZE_DOGS] + test_cats[:TEST_SIZE_CATS]
test_labels = np.array((['dogs'] * TEST_SIZE_DOGS + ['cats'] * TEST_SIZE_CATS))
第二步:
#reshape the image
def read_image(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR) #cv2.IMREAD_GRAYSCALE
if (img.shape[0] >= img.shape[1]): # height is greater than width
resizeto = (IMAGE_SIZE, int (round (IMAGE_SIZE * (float (img.shape[1]) / img.shape[0]))));
else:
resizeto = (int (round (IMAGE_SIZE * (float (img.shape[0]) / img.shape[1]))), IMAGE_SIZE);
img2 = cv2.resize(img, (resizeto[1], resizeto[0]), interpolation=cv2.INTER_CUBIC)
img3 = cv2.copyMakeBorder(img2, 0, IMAGE_SIZE - img2.shape[0], 0, IMAGE_SIZE - img2.shape[1], cv2.BORDER_CONSTANT, 0)
return img3[:,:,::-1] # turn into rgb format
第三步:
#normalize the image , to make the preparation
def prep_data(images):
count = len(images)
#构建数组,[count, Image_size, image_size, channels],channel表示图片的层数,也就是厚度
data = np.ndarray((count, IMAGE_SIZE, IMAGE_SIZE, CHANNELS), dtype=np.float32)
for i, image_file in enumerate(images):
#print(image_file)
img = read_image(image_file)
#将读到的图片数据变成浮点型的数组
image_data = np.array(img, dtype=np.float32)
#进行第一层的正则化处理
image_data[:, :, 0] = (image_data[:, :, 0].astype(float) - pixel_depth / 2) / pixel_depth
#进行第二层的正则化处理
image_data[:, :, 1] = (image_data[:, :, 1].astype(float) - pixel_depth / 2) / pixel_depth
#进行第三层的正则化处理,rgb图像一共三层
image_data[:, :, 2] = (image_data[:, :, 2].astype(float) - pixel_depth / 2) / pixel_depth
#data[i] = image_data
if i %250 == 0:
print("Processed {} of {}".format(i, count))
return data
train_normalized = prep_data(train_images)
test_normalized = prep_data(test_images)
print("Train shape: {}".format(train_normalized.shape))
print("Test shape: {}".format(test_normalized.shape))
正常输出为:
np.random.seed(133)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset_rand, train_labels_rand = randomize(train_normalized, train_labels)
test_dataset, test_labels = randomize(test_normalized, test_labels)
train_dataset = train_dataset_rand[VALID_SIZE:VALID_SIZE+TRAINING_SIZE, :, :, :]
train_labels = train_labels_rand[VALID_SIZE:VALID_SIZE+TRAINING_SIZE]
test_dataset = train_dataset_rand[VALID_SIZE: TEST_SIZE_ALL, :, :, :]
test_labels = train_labels_rand[VALID_SIZE: TEST_SIZE_ALL]
print('Training', train_dataset.shape, train_labels.shape)
print('Test', test_dataset.shape, test_labels.shape)
第一步:定义常量
# 图像大小
image_size = IMAGE_SIZE # TODO: redundant, consolidate
# 标签的个数,也就是分类的种数,这里只有猫和狗两种
num_labels = 2
# 图片的厚度,为rgb图像一共三层
num_channels = 3 # rg
第二步:将数据和标签变成tensor
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (labels=='cats').astype(np.float32); # set dogs to 0 and cats to 1
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print ('Training set', train_dataset.shape, train_labels.shape)
print ('Test set', test_dataset.shape, test_labels.shape)
第三步:定义变量和常用函数
xs = tf.placeholder(tf.float32, [None, 64, 64, 3])
ys = tf.placeholder(tf.float32, [None, 2])
#保留概率,为了防止过拟合,使用dropout的方法丢掉一些值
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 64, 64, 3])
#define the weight_variable funciton
def Weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
#define the bias_variable funtion
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#define the conv funciton
def con2d(inputs, weight):
return tf.nn.conv2d(inputs, weight, strides=[1,1,1,1], padding='SAME')
#define the pool fuction
def max_pooling_2x2(inputs):
return tf.nn.max_pool(inputs, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
#define the computer accracy function
def computer_accuracy(v_xs, v_ys):
global prediction
y_re = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
#这里的500为测试的样本大小,在改变测试样本大小时要记得同时改变这个数字
return (np.sum(np.argmax(y_re, 1) == np.argmax(v_ys, 1)) / 500)
第四步:模型的搭建
#厚度一开始为3,由于每次步长为1,而每次池化步长为2,所以宽度和长度各缩减为原来的一般
#the first convolution
W_conv1 = Weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(con2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pooling_2x2(h_conv1) #长宽 = [32*32]
#the second convolution
W_conv2 = Weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(con2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pooling_2x2(h_conv2) #长宽 = [16*16]
#the first full connection layer
w_f1 = Weight_variable([16*16*64, 4096])
b_f1 = bias_variable([4096])
h_pool2_flat = tf.reshape(h_pool2, [-1, 16*16*64])
h_f1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_f1) + b_f1)
h_f1_drop = tf.nn.dropout(h_f1, keep_prob)
w_f2 = Weight_variable([4096, 2])
b_f2 = bias_variable([2])
prediction = tf.nn.softmax(tf.matmul(h_f1_drop, w_f2) + b_f2)
#the loss between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(-ys*tf.log(prediction),
reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
第五步:启动图,开始训练。
sess = tf.Session()
sess.run(tf.initialize_all_variables())
#2000为训练的步数
for i in range(20000):
#1000为每次训练的样本数量
offset = (i * 1000) % (train_labels.shape[0] - 1000)
batch_xs = train_dataset[offset:(offset + 1000), :, :, :]
batch_ys = train_labels[offset:(offset + 1000), :]
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.8})
#计算精度
val = computer_accuracy(test_dataset, test_labels)
print("Train step %d:" %i ,val)
此模型的准确率只能达到百分之七十左右,各种参数还可以优化。作为一名新手,如果对于其中的tensorflow不是很了解的话,建议可以去看一下tensorflow的官方文档,tensorflow是一个框架,需要安装,可以使用pip install tensorflow 安装CPU版本。猫和狗的数据可以从kaggle的dog vs cat比赛中下载。