上一节可视化了训练的网络的权值偏置值等数据的曲线图,这节可视化网络的训练过程。
代码import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector#读取数据集
mnist = input_data.read_data_sets("../MNIST_DATA", one_hot=True)
max_steps=1001#运行次数
image_num=3000#图片数量
DIR='E:/py_code'#文件路径sess=tf.Session()#定义会话#载入图片(将3000个图片打包成一个矩阵)
embedding=tf.Variable(tf.stack(mnist.test.images[:image_num]),trainable=False,name='embedding')#参数概要
def variable_summaries(var): # 在tensorboard中显示var的相关属性值with tf.name_scope("summaries"):mean = tf.reduce_mean(var) # 平均值tf.summary.scalar("mean", mean)with tf.name_scope("stddev"):stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))tf.summary.scalar("stddev", stddev) # 标准差tf.summary.scalar("max", tf.reduce_max(var)) # 最大值tf.summary.scalar("min", tf.reduce_min(var)) # 最小值tf.summary.histogram("histogram", var) # 直方图
#命名空间
with tf.name_scope('input'):x=tf.placeholder(tf.float32,[None,784],name='x_input')y=tf.placeholder(tf.float32,[None,10],name='y_input')
#显示10个数字的图片
with tf.name_scope('input_reshape'):image_shaped_input=tf.reshape(x,[-1,28,28,1])tf.summary.image('input',image_shaped_input,10)
#创建简单神经网络
with tf.name_scope('layer'):with tf.name_scope('weights'):w=tf.Variable(tf.random_normal([784,10]),name='W')variable_summaries(w)with tf.name_scope('biases'):b=tf.Variable(tf.zeros([10]),'b')variable_summaries(b)with tf.name_scope('Wx_plus_b'):#线性Wx_plus_b=tf.matmul(x,w)+bwith tf.name_scope('softmax'):#通过softmax激活函数变成非线性y_pred=tf.nn.softmax(Wx_plus_b)
#交叉熵代价函数
with tf.name_scope('loss'):loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=y_pred))tf.summary.scalar('loss',loss)
#精确度
with tf.name_scope('accuracy'):accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1)),tf.float32))# argmax返回一维张量中最大值所在的位置tf.summary.scalar('accuracy',accuracy)
#使用梯度下降算法来训练,使得loss最小(可优化)
with tf.name_scope('train'):train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)#初始化变量
sess.run(tf.global_variables_initializer())
#产生metadata文件
if tf.gfile.Exists(DIR + '/projector/projector/metadata.tsv'):tf.gfile.DeleteRecursively(DIR + '/projector/projector/metadata.tsv')
with open(DIR + '/projector/projector/metadata.tsv', 'w') as f:labels = sess.run(tf.argmax(mnist.test.labels[:], 1))for i in range(image_num):f.write(str(labels[i]) + '\n')merged = tf.summary.merge_all()projector_writer = tf.summary.FileWriter(DIR + '/projector/projector', sess.graph)#定义一个writer,写入图
# writer=tf.summary.FileWriter(DIR+'/projector/projector/logs',sess.graph)
saver = tf.train.Saver()#保存图
config = projector.ProjectorConfig()#配置
embed = config.embeddings.add()
embed.tensor_name = embedding.name
embed.metadata_path = DIR + '/projector/projector/metadata.tsv'
embed.sprite.image_path = DIR + '/mnist_10k_sprite.png'
embed.sprite.single_image_dim.extend([28, 28])
projector.visualize_embeddings(projector_writer, config)#开始训练
for i in range(max_steps):batch_xs, batch_ys = mnist.train.next_batch(100)run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)run_metadata = tf.RunMetadata()summary, _ = sess.run([merged, train_step], feed_dict={x: batch_xs, y: batch_ys}, options=run_options,run_metadata=run_metadata)projector_writer.add_run_metadata(run_metadata, 'step%03d' % i)projector_writer.add_summary(summary, i)if i % 100 == 0:#每100个图片打印一次结果acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})print('Iter ' + str(i) + ', accuracy ' + str(acc))saver.save(sess, DIR + '/projector/projector/a_model.ckpt', global_step=max_steps)
projector_writer.close()
sess.close()
1、首先要使用这个代码需要修改这个文件路径:
DIR='E:/py_code'#文件路径
2、然后在文件路劲下创建两级projector子目录(不然会报错找不到路径,但是网课说的是会自动创建至于为什么不行也就没有深究):
3、修改你的图片路径:
embed.sprite.image_path = DIR + '/mnist_10k_sprite.png'
我这里就是直接放在py_code底下了,图片有1.5M。。。csdn上传图片有水印所以就放在csdn的资源里吧。
下载链接:训练图片下载传送门
1、程序运行后可以在projector子目录下看到生成的文件:
2、win + R打开命令运行窗口,输入cmd回车打开shell:
3、切换到文件对应的盘:
e:
4、运行tensorboard可视化:
tensorboard --logdir=E:\py_code\tensorflow\logs
5、复制最底下的url:http://DESKTOP-V48600Q:6006在浏览器打开,用localhost加端口号6006也能打开。
6、切换到EMBEDDINGS栏如下图:
7、选择T-SNE:
8、点击左侧中间部位的Re-run可以重新运行模型,将Color by一栏选择lable 10 colors可以区分数字的颜色:
9、训练400次的结果:
10、修改优化器提高准确度:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector#读取数据集
mnist = input_data.read_data_sets("../MNIST_DATA", one_hot=True)
max_steps = 1001 # 运行次数
image_num = 3000 # 图片数量
DIR = 'E:/py_code' # 文件路径sess = tf.Session() # 定义会话# 载入图片(将3000个图片打包成一个矩阵)
embedding=tf.Variable(tf.stack(mnist.test.images[:image_num]),trainable=False,name='embedding')# 命名空间
x = tf.placeholder(tf.float32, [None, 784], name='x_input')
y = tf.placeholder(tf.float32, [None, 10], name='y_input')
# dropout的所要训练神经元的比例
keep_prob = tf.placeholder(tf.float32)
# 学习率
lr = tf.Variable(0.001, dtype=tf.float32)# 显示10个数字的图片
image_shaped_input=tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10)# 创建简单神经网络
W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1))
b1 = tf.Variable(tf.zeros([500]) + 0.1)
L1 = tf.nn.relu(tf.matmul(x, W1) + b1)W2 = tf.Variable(tf.truncated_normal([500, 300], stddev=0.1))
b2 = tf.Variable(tf.zeros([300]) + 0.1)
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)W4 = tf.Variable(tf.truncated_normal([300, 10], stddev=0.1))
b4 = tf.Variable(tf.zeros([10]) + 0.1)
y_pred = tf.nn.softmax(tf.matmul(L2, W4) + b4)# 交叉熵代价函数
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=y_pred))
# 精确度
accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(y_pred,1)),tf.float32)) # argmax返回一维张量中最大值所在的位置
# 使用梯度下降法训练,使得loss最小(#可优化)
train_step = tf.train.AdamOptimizer(lr).minimize(loss)# 初始化变量
sess.run(tf.global_variables_initializer())
# 产生metadata文件
if tf.gfile.Exists(DIR + '/projector/projector/metadata.tsv'):tf.gfile.DeleteRecursively(DIR + '/projector/projector/metadata.tsv')
with open(DIR + '/projector/projector/metadata.tsv', 'w') as f:labels = sess.run(tf.argmax(mnist.test.labels[:], 1))for i in range(image_num):f.write(str(labels[i]) + '\n')projector_writer = tf.summary.FileWriter(DIR + '/projector/projector', sess.graph) # 定义一个writer,写入图
# writer=tf.summary.FileWriter(DIR+'/projector/projector/logs',sess.graph)
saver = tf.train.Saver() # 保存图
config = projector.ProjectorConfig() # 配置
embed = config.embeddings.add()
embed.tensor_name = embedding.name
embed.metadata_path = DIR + '/projector/projector/metadata.tsv'
embed.sprite.image_path = DIR + '/mnist_10k_sprite.png'
embed.sprite.single_image_dim.extend([28, 28])
projector.visualize_embeddings(projector_writer, config)# 开始训练
for i in range(max_steps):sess.run(tf.assign(lr, 0.001 * (0.95 ** i)))batch_xs, batch_ys = mnist.train.next_batch(100)run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)run_metadata = tf.RunMetadata()summary = sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys}, options=run_options,run_metadata=run_metadata)projector_writer.add_run_metadata(run_metadata, 'step%03d' % i)if i % 100 == 0: # 每100个图片打印一次结果acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})print('Iter ' + str(i) + ', accuracy ' + str(acc))saver.save(sess, DIR + '/projector/projector/a_model.ckpt', global_step=max_steps)
projector_writer.close()
sess.close()
这里注意下再次运行代码的时候需要把这些文件全删了(不然会运行报错):