輸出結果
TF之LoR:基于tensorflow利用邏輯回歸算LoR法實作手寫數字圖檔識别提高準确率
![](https://img.laitimes.com/img/__Qf2AjLwojIjJCLyojI0JCLicmbw5iY1MjY4YGM4QmNhVmZ2UmY4IDM0YTM3QjZhZTYzM2Nj9CX5d2bs92Yl1iclB3bsVmdlR2LcNWaw9CXt92Yu4GZjlGbh5yYjV3Lc9CX6MHc0RHaiojIsJye.png)
設計代碼
#TF之LoR:基于tensorflow實作手寫數字圖檔識别準确率
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
print(mnist)
#設定超參數
lr=0.001 #學習率
training_iters=100 #訓練次數
batch_size=100 #每輪訓練資料的大小,如果一次訓練5000張圖檔,電腦會卡死,分批次訓練會更好
display_step=1
#tf Graph的輸入
x=tf.placeholder(tf.float32, [None,784])
y=tf.placeholder(tf.float32, [None, 10])
#設定權重和偏置
w =tf.Variable(tf.zeros([784,10]))
b =tf.Variable(tf.zeros([10]))
#設定運作模式
pred =tf.nn.softmax(tf.matmul(x,w)+b) #
#設定cost function為cross entropy
cost =tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1))
#GD算法
optimizer=tf.train.GradientDescentOptimizer(lr).minimize(cost)
#初始化權重
init=tf.global_variables_initializer()
#開始訓練
with tf.Session() as sess:
sess.run(init)
avg_cost_list=[]
for epoch in range(training_iters): #輸入所有訓練資料
avg_cost=0.
total_batch=int(mnist.train.num_examples/batch_size)
for i in range(total_batch): #周遊每個batch
……
if (epoch+1) % display_step ==0: #顯示每次疊代日志
print("疊代次數Epoch:","%04d" % (epoch+1),"下降值cost=","{:.9f}".format(avg_cost))
avg_cost_list.append(avg_cost)
print("Optimizer Finished!")
print(avg_cost_list)
#測試模型
correct_prediction=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print("Accuracy:",accuracy.eval({x:mnist.test.images[:3000],y:mnist.test.labels[:3000]}))
xdata=np.linspace(0,training_iters,num=len(avg_cost_list))
plt.figure()
plt.plot(xdata,avg_cost_list,'r')
plt.xlabel('訓練輪數')
plt.ylabel('損失函數')
plt.title('TF之LiR:基于tensorflow實作手寫數字圖檔識别準确率——Jason Niu')
plt.show()