天天看點

Tensorflow可視化程式設計安裝Tensoflow1.0将加法運算以圖形化方式展示實作簡單的線性回歸為程式添加作用域模型的儲存與恢複(儲存會話資源)

安裝Tensoflow1.0

Linux/ubuntu:

  • python2.7:
pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp27-none-linux_x86_64.whl
           
  • python3.5:
pip3 install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp35-cp35m-linux_x86_64.whl 
           

Maxos:

  • python2:
pip install https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py2-none-any.whl
           
  • python3:
pip3 install https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py3-none-any.whl
           

Tensorflow完成加法

import tensorflow as tf
# 消除警告(使用源碼安裝可自動消除)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

a = tf.constant(3.0)
b = tf.constant(4.0)

with tf.Session() as sess:
    a_b = tf.add(a, b)
    print("相加後的類型為")
    print(a_b)
    print("真正的結果為:")
    print(sess.run(a_b))
           
tf_add

将加法運算以圖形化方式展示

  • 在會話中添加記錄檔案的語句
import tensorflow as tf
# 消除警告(使用源碼安裝可自動消除)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

a = tf.constant(3.0)
b = tf.constant(4.0)

with tf.Session() as sess:
    a_b = tf.add(a, b)
    print("相加後的類型為")
    print(a_b)
    print("真正的結果為:")
    print(sess.run(a_b))
    # 添加board記錄檔案
    file_write = tf.summary.FileWriter('/Users/lijianzhao/tensorBoard/', graph=sess.graph)
           
  • 在終端運作

    tensorboard --logdir="/Users/lijianzhao/tensorBoard/"

在終端運作tensorboard
  • 根據終端提示,在浏覽器鍵入

    http://192.168.199.213:6006

tensorboard主界面
  • 選擇GRAPHS

實作簡單的線性回歸

import tensorflow as tf
# 消除警告(使用源碼安裝可自動消除)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 回歸函數
def my_regression():

    # 準備10000 條資料x的平均值為5.0 标準差為1.0
    x = tf.random_normal([100, 1], mean = 5.0, stddev=1.0, name="x")
    # 真實的關系為 y = 0.7x + 0.6
    y_true = tf.matmul(x, [[0.7]]) + 0.6

    # 建立權重變量
    weight = tf.Variable(tf.random_normal([1, 1], mean=1.0, stddev=0.1), name="weight")

    # 建立偏置變量,初始值為1
    bias = tf.Variable(1.0, name="bias")

    # 預測結果
    y_predict = tf.matmul(x, weight) + bias

    # 計算損失
    loss = tf.reduce_mean(tf.square(y_predict - y_true))

    # 梯度下降減少損失,每次的學習率為0.1
    train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    # 收集變量
    tf.summary.scalar("losses", loss)
    tf.summary.histogram("weightes", weight)

    # 合并變量
    merged = tf.summary.merge_all()

    # 初始化變量
    init_op = tf.global_variables_initializer()

    # 梯度下降優化損失
    with tf.Session() as sess:
        sess.run(init_op)

        print("初始的權重為{}, 初始的偏置為{}".format(weight.eval(), bias.eval()))

        # 添加board記錄檔案
        file_write = tf.summary.FileWriter('/Users/lijianzhao/tensorBoard/my_regression', graph=sess.graph)


        # 循環訓練線性回歸模型
        for i in range(20000):
            sess.run(train_op)
            print("訓練第{}次的權重為{}, 偏置為{}".format(i,weight.eval(), bias.eval()))

            # 觀察每次值的變化
            # 運作merge
            summery = sess.run(merged)
            # 每次收集到的值添加到檔案中
            file_write.add_summary(summery, i)


if __name__ == '__main__':
    my_regression()
           

運作結果

程式流程圖

損失值降低
權重逐漸接近真實值

為程式添加作用域

import tensorflow as tf
# 消除警告(使用源碼安裝可自動消除)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 回歸函數
def my_regression():

    # 準備資料
    with tf.variable_scope("data"):
        # 準備10000 條資料x的平均值為5.0 标準差為1.0
        x = tf.random_normal([100, 1], mean = 5.0, stddev=1.0, name="x")
        # 真實的關系為 y = 0.7x + 0.6
        y_true = tf.matmul(x, [[0.7]]) + 0.6

    # 建立模型
    with tf.variable_scope ("model"):
        # 建立權重變量
        weight = tf.Variable(tf.random_normal([1, 1], mean=1.0, stddev=0.1), name="weight")

        # 建立偏置變量,初始值為1
        bias = tf.Variable(1.0, name="bias")

        # 預測結果
        y_predict = tf.matmul(x, weight) + bias

    # 計算損失
    with tf.variable_scope ("loss"):
        # 計算損失
        loss = tf.reduce_mean(tf.square(y_predict - y_true))

    # 減少損失
    with tf.variable_scope("optimizer"):
        # 梯度下降減少損失,每次的學習率為0.1
        train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    # 收集變量
    tf.summary.scalar("losses", loss)
    tf.summary.histogram("weightes", weight)

    # 合并變量
    merged = tf.summary.merge_all()

    # 初始化變量
    init_op = tf.global_variables_initializer()

    # 梯度下降優化損失
    with tf.Session() as sess:
        sess.run(init_op)
        print("初始的權重為{}, 初始的偏置為{}".format(weight.eval(), bias.eval()))
        # 添加board記錄檔案
        file_write = tf.summary.FileWriter('/Users/lijianzhao/tensorBoard/my_regression', graph=sess.graph)
        # 循環訓練線性回歸模型
        for i in range(20000):
            sess.run(train_op)
            print("訓練第{}次的權重為{}, 偏置為{}".format(i,weight.eval(), bias.eval()))
            # 觀察每次值的變化
            # 運作merge
            summery = sess.run(merged)
            # 每次收集到的值添加到檔案中
            file_write.add_summary(summery, i)

if __name__ == '__main__':
    my_regression()

           
添加作用域

模型的儲存與恢複(儲存會話資源)

  • 建立儲存模型的saver
saver = tf.train.Saver()
           
  • 儲存模型
saver.save(sess, "./tmp/ckpt/test")
           
  • 恢複模型
save.restore(sess, "./tmp/ckpt/test")