天天看點

tensorflow全連接配接神經網絡代碼實作

訓練目的

由np産生随機資料train_x,标簽資料是train_y 由 f(x )= sinx + cosx産生,即 train_y = f(train_x)

訓練結果

藍色是神經網絡訓練相應次數産生的資料

紅色是直接由x = np.arrange(0, 2 * np.pi, 0.01), y = f(x), pylab.plot(x, y)畫出,正确的f(x)函數圖像

tensorflow全連接配接神經網絡代碼實作
tensorflow全連接配接神經網絡代碼實作
tensorflow全連接配接神經網絡代碼實作
tensorflow全連接配接神經網絡代碼實作

可以看出,随着訓練次數的增加,圖像逐漸重合

代碼奉上

(借鑒了《TensorFlow入門與實戰》的代碼思想)

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pylab

'''
實驗目的是為了熟悉練習全連接配接網絡的使用
本實驗采用的訓練資料采用np産生的資料,标簽資料為np.sin(x) + np.cos(x)
輸入層:1
hidden1:20nodes w1 = [1, 20], bias1 = [1, 20] a1 = [1, 20]       aw + bias
hidden2:15nodes w2 = [20, 15], bias2 = [1, 15] a2 = [1, 15]
hidden3:10nodes w3 = [15, 10], bias3 = [1, 10] 
hidden4:15nodes w4 = [10, 15], bias4 = [1, 15]
hidden5:10nodes w5 = [15, 10], bias4 = [1, 10]
output layer:1 output w5= [10, 1] bias5 = [1]
'''

def func(x):
    # 定義目标函數
    y = np.sin(x) + np.cos(x)
    return y

def get_correct_graph():
    x = np.arange(0, 2 * np.pi, 0.01) # 此處生成的是1行n列的清單
    x = x.reshape((len(x), 1)) # 此處将清單變換為n行1列的矩陣
    y = func(x)
    # 畫圖
    pylab.plot(x, y, color='red', linewidth=4)
    # plt.axhline(y=0, color='red', linewidth=4)

def get_train_data():
    '''擷取訓練神經網絡所用yoga的資料'''
    train_x = np.random.uniform(0, 2 * np.pi, (1))
    train_y = func(train_x)
    return train_x, train_y

def inference(input_data):
    '''對正向傳播進行參數設定,hyperparameters setting,參數初始化為接近為零的數字'''
    with tf.variable_scope('hidden1'):
        weights = tf.get_variable('weight', [1, 20], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable('bias', [1, 20], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        hidden1 = tf.sigmoid(tf.multiply(input_data, weights) + biases)

    with tf.variable_scope('hidden2'):
        weights = tf.get_variable('weight', [20, 15], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable('bias', [1, 15], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        hidden2 = tf.sigmoid(tf.matmul(hidden1, weights) + biases)

    with tf.variable_scope('hidden3'):
        weights = tf.get_variable('weight', [15, 10], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable('bias', [1, 10], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        hidden3 = tf.sigmoid(tf.matmul(hidden2, weights) + biases)

    with tf.variable_scope('hidden4'):
        weights = tf.get_variable('weight', [10, 15], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable('bias', [1, 15], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        hidden4 = tf.sigmoid(tf.matmul(hidden3, weights) + biases)

    with tf.variable_scope('hidden5'):
        weights = tf.get_variable('weight', [15, 10], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable('bias', [1, 10], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        hidden5 = tf.sigmoid(tf.matmul(hidden4, weights) + biases)

    with tf.variable_scope('output'):
        weights = tf.get_variable('weight', [10, 1], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        biases = tf.get_variable('bias', [1], tf.float32,
                                  initializer=tf.random_normal_initializer(0.0, 1))
        output = tf.matmul(hidden5, weights) + biases

    return output

def train():
    train_rate = 0.01
    # 對x和y采用占位符
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    y_hat = inference(x)

    loss = tf.square(y_hat - y)
    opt = tf.train.GradientDescentOptimizer(train_rate)
    train_opt = opt.minimize(loss)

    # 參數初始化
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        # 開始喂入資料
        print('start training....')
        for i in range(500000): # 訓練500000次
            train_x, train_y = get_train_data()
            sess.run(train_opt, feed_dict={x: train_x,y: train_y})

            # 對上述訓練好的模型進行測試
            if i % 100000 == 0:
                times = int(i / 100000)
                test_x_ndarray = np.arange(0, 2 * np.pi, 0.01)
                test_y_ndarray = np.zeros([len(test_x_ndarray)])
                ind = 0
                for test_x in test_x_ndarray:
                    test_y = sess.run(y_hat, feed_dict={x: test_x, y: 1})
                    np.put(test_y_ndarray, ind, test_y)
                    ind += 1

                get_correct_graph()
                pylab.plot(test_x_ndarray, test_y_ndarray, color='blue', linewidth=3)
                plt.legend([y, test_y_ndarray], ['y', 'y_trained'], loc='lower left')
                plt.title('after trained ' + str(times * 100000) + ' times...')
                plt.show()

if __name__ == '__main__':
    train()

           

繼續閱讀