python版本:Python 3.5.2 |Anaconda 4.2.0 (64-bit)| (default, Jul 5 2016, 11:41:13) [MSC v.1900 64 bit (AMD64)] on win32
tensorflow版本:1.13.1
IDE:pycharm
---------------------
作者:華北月下老人
來源:CSDN
原文:https://blog.csdn.net/qwer7512090/article/details/88429625
版權聲明:本文為部落客原創文章,轉載請附上博文連結!
寫了下第二個helloworld,mnist手寫數字識别,有兩個Session,第一個用來訓練,第二個用來驗證。
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import pylab
mnist = input_data.read_data_sets("MNIST_DATA/",one_hot=True)
#自動下載下傳安# 裝mnist
# print("輸入資料",mnist.train.images)
# print(mnist.train.images.shape)
# import pylab
# im = mnist.train.images[1]
# im = im.reshape(-1,28)
# pylab.imshow(im)
# pylab.show()
#
# print("測試資料",mnist.test.images.shape)
# print("驗證資料",mnist.validation.images.shape)
tf.reset_default_graph()
#定義占位符
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#None代表可以是任意長度的,x代表能輸入任意數量的圖像
W = tf.Variable(tf.random_normal([784,10]))
b = tf.Variable(tf.zeros([10]))
pred = tf.nn.softmax(tf.matmul(x,W)+b)
#softmax分類
#定義損失函數
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred)))
#參數
learning_rate = 0.01
#使用梯度下降優化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
training_epochs = 50
batch_size = 100
display_step = 1
saver = tf.train.Saver()
model_path = "log/mnist/nmist_model.ckpt"
#啟動session
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer()) #初始化op
# #開始訓練
# for epoch in range(training_epochs):
# avg_cost = 0
# total_batch = int(mnist.train.num_examples/batch_size)
# #循環一遍所有資料集
# for i in range(total_batch):
# batch_xs,batch_ys = mnist.train.next_batch(batch_size)
# #運作優化器
# _,c = sess.run([optimizer,cost],feed_dict={x:batch_xs,y:batch_ys})
#
# #計算平均損失
# avg_cost+=c/total_batch
#
# if (epoch+1)%display_step == 0:
# print("Epoch:","%04d" % (epoch+1),"cost = ","{:.9f}".format(avg_cost))
#
# #測試
# correct_or_not = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
# #計算準确率
# accuracy = tf.reduce_mean(tf.cast(correct_or_not,tf.float32))
# print("Accuracy:",accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
# print("Finished!")
#
# #儲存模型
# save_path = saver.save(sess,model_path)
# print("model saved in file:%s" % save_path)
# print("sstarting 2ed session")
with tf.Session() as sess:
#初始化變量
sess.run(tf.global_variables_initializer())
#恢複模型變量
saver.restore(sess,model_path)
#測試
correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
#計算準确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
output = tf.argmax(pred,1)
batch_xs,batch_ys = mnist.train.next_batch(2)
outputval,predval = sess.run([output,pred],feed_dict={x:batch_xs})
#output是具體的數字,pred是10位标簽
print(outputval,predval,batch_ys)
im = batch_xs[0]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()
im = batch_xs[1]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()