天天看點

圖檔驗證碼部分代碼整理

X = tf.placeholder(tf.float32,[None, IMAGE_HEIGHT * IMAGE_WIDTH])
Y = tf.placeholder(tf.int32,[None, char_len_max])
keep_prob = tf.placeholder(tf.float32)  # dropout
w_alpha = 
b_alpha = 
lr =   # 學習率

batch_size=

def add_layer(input=None, w_shape=None, b_shape=None, conv2d=False, active_func=None, name=None):
    # 前兩維是patch的大小,第三維時輸入通道的數目,最後一維是輸出通道的數目。我們對每個輸出通道加上了偏置(bias)
    with tf.name_scope(name):
        with tf.name_scope('weights'):
            w = tf.Variable(w_alpha * tf.random_normal(w_shape))
            #tf.summary.histogram(name+'/weights',w)
        with tf.name_scope('biases'):
            b = tf.Variable(b_alpha * tf.random_normal(b_shape))
            #tf.summary.histogram(name+'/biases',b)
        # 卷基層與池化層
        if conv2d and active_func:
            conv = active_func(tf.nn.bias_add(tf.nn.conv2d(input, w, strides=[, , , ], padding='SAME'), b))
            conv = tf.nn.max_pool(conv, ksize=[, , , ], strides=[, , , ], padding='SAME')
            conv = tf.nn.dropout(conv, keep_prob=keep_prob)
            #tf.summary.histogram(name + '/conv', conv)
            return conv
        # 全連接配接層
        elif active_func:
            dense = tf.reshape(input, [-, w_shape[]])
            dense = active_func(tf.add(tf.matmul(dense, w), b))
            dense = tf.nn.dropout(dense, keep_prob=keep_prob)
            #tf.summary.histogram(name+'/dense',dense)
            return dense
        # 輸出層
        else:
            out = tf.add(tf.matmul(input, w), b)
            #tf.summary.histogram(name+'/output',out)
            return out


# 定義模型
def model():
    # 為了使得圖檔與計算層比對,我們首先reshape輸入圖像x為4維的tensor,第2、3維對應圖檔的寬和高,最後一維對應顔色通道的數目。
    x = tf.reshape(X, [-, IMAGE_HEIGHT, IMAGE_WIDTH, ])

    # w_shape前兩維是patch的大小,第三維時輸入通道的數目,最後一維是輸出通道的數目。我們對每個輸出通道加上了偏置(bias)
    # 第一層
    layer1 = add_layer(input=x, w_shape=[, , , ], b_shape=[], conv2d=True, active_func=tf.nn.relu, name='layer1')
    # 第二層
    layer2 = add_layer(input=layer1, w_shape=[, , , ], b_shape=[], conv2d=True, active_func=tf.nn.relu,
                       name='layer2')
    # 第三層
    layer3 = add_layer(input=layer2, w_shape=[, , , ], b_shape=[], conv2d=True, active_func=tf.nn.relu,
                       name='layer3')
    # 全連接配接層
    layer_full = add_layer(input=layer3, w_shape=[ *  * , ], b_shape=[], active_func=tf.nn.relu,
                           name='layer_full')
    # 輸出層
    layer_out = add_layer(input=layer_full, w_shape=[, char_total_num*char_len_max],
                          b_shape=[char_total_num*char_len_max], name='layer_out')
    return layer_out


# 訓練
def train_model():
    #tf.summary.scalar('accuracy',accuracy)
    pred = model()
    # 損失函數
    pred=tf.reshape(pred,[-,char_total_num])
    target = tf.reshape(Y, [-])
    loss = sequence_loss_by_example([pred], [target], [tf.ones_like(target, dtype=tf.float32)])
    cost = tf.reduce_sum(loss) / batch_size
    train_op = tf.train.AdamOptimizer(lr).minimize(cost)
           

繼續閱讀