天天看點

tf.kerasr入門示例:Lenet手寫字元分類(2擴充) eager模式下Sequence生成器方式加載資料并訓練

  • Sequence方式擴充自己的自定義資料集加載, 參考連結: https://blog.csdn.net/m0_37477175/article/details/79716312
  • eager模式下使用tf.keras訓練和評估方式

源代碼

# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import time
import argparse
import math

import tensorflow as tf
from tensorflow.keras.utils import Sequence
from tensorflow.data import Dataset
from tensorflow.contrib.eager.python import datasets
import tensorflow.contrib.eager as tfe
import numpy as np

"""兩種方式加載的資料集不同圖像部分資料是不同的,
official.mnist: 加載的圖像是uint8資料類型編碼, /255. 需要歸一化
tensorflow.examples.tutorials.mnist 是float類型編碼, 無需歸一化操作
"""
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Model
from tensorflow.keras.layers import MaxPooling2D, Conv2D, Input, Flatten, Dense, Dropout

# 立即執行模式
tf.enable_eager_execution()

"""
Sequence生成器的方法
__iter__()
__getitem__(index)
支援batch
"""
class DataGenerator(Sequence):
    # 自定義資料集加載方式, 
    # 傳入資料可以是檔案清單或是其他格式,實作相應的加載和預處理方法即可
    def __init__(self, x, y, batch_size=, shuffle=True):
        self.batch_size = batch_size
        self.x, self.y = x, y
        # 索引重排
        self.indexes = np.arange(len(self.x))
        self.shuffle = shuffle

    def __len__(self):
        # 計算每一個epoch的疊代次數
        return math.ceil(len(self.x) / float(self.batch_size))

    def __getitem__(self, index):
        # 生成每個batch資料,這裡就根據自己對資料的讀取方式進行發揮了
        # 生成batch_size個索引
        batch_indexs = self.indexes[index*self.batch_size:(index+)*self.batch_size]
        # 根據索引擷取資料集合中的資料
        x, y = self.data_generation(batch_indexs)
        return x, y

    def on_epoch_end(self):
        # 在每一次epoch結束是否需要進行一次随機,重新随機一下index
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def data_generation(self, batch_indexs):
        x = self.x[batch_indexs]
        y = self.y[batch_indexs]
        return x, y

class LeNet(Model):
    def __init__(self, input_shape=(, , ), num_classes=):
        # super(LeNet, self).__init__(name="LeNet")
        self.num_classes = num_classes
        ''' 定義要用到的層 layers '''
        # 輸入層
        img_input = Input(shape=input_shape)

        # Conv => ReLu => Pool
        x = Conv2D(filters=, kernel_size=, padding="same", activation="relu" ,name='block1_conv1')(img_input)
        x = MaxPooling2D(pool_size=(, ), strides=(, ), name='block1_pool')(x)
        # Conv => ReLu => Pool
        x = Conv2D(filters=, kernel_size=, padding="same", activation="relu", name='block1_conv2')(x)
        x = MaxPooling2D(pool_size=(, ), strides=(, ), name='block1_poo2')(x)
        # 壓成一維
        x = Flatten(name='flatten')(x)
        # 全連接配接層
        x = Dense(units=, activation="relu", name="f1")(x)
        # softmax分類器
        x = Dense(units=num_classes, activation="softmax", name="prediction")(x)

        # 調用Model類的Model(input, output, name="***")構造方法
        super(LeNet, self).__init__(img_input, x, name="LeNet")

# 自定義損失函數
def loss(logits, labels):
    # softmax_cross_entropy_with_logits 為每一個輸入項結果計算一個損失, 傳回值arrayList, 長度N=Batch
    # reduce_mean 再求均值
    return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))
    # return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))

# 自定義評估函數
def compute_accuracy(logits, labels):
    predictions = tf.argmax(input=logits, axis=, output_type=tf.int64)
    labels = tf.argmax(input=labels, axis=, output_type=tf.int64) #tf.cast(x=labels, dtype=tf.int64)
    batch_size = int(logits.shape[])
    return tf.reduce_sum(tf.cast(tf.equal(predictions, labels), dtype=tf.float32)) / batch_size



def run_mnist_eager(cfg):
    # 自動選擇裝置
    (device, data_format) = ('/gpu:0', 'channels_first')
    if not tf.test.is_gpu_available():
        (device, data_format) = ('/cpu:0', 'channels_last')

    print('Using device %s, and data format %s.' % (device, data_format))
    # 載入資料集
    train_ds, test_ds = load_mnist() # shape = (?, 768) / (?)
    # train_ds = train_ds.shuffle(60000, reshuffle_each_iteration=True).batch(cfg.batch_size)
    # test_ds = test_ds.batch(cfg.batch_size)
    # print(train_ds.output_shapes, test_ds.output_shapes)

    # 建立 model and optimizer
    model = LeNet()
    optimizer = tf.train.MomentumOptimizer(cfg.lr, cfg.momentum)
    model.compile(optimizer=optimizer,
                  loss=loss, #'categorical_crossentropy',
                  metrics=[compute_accuracy] ) #['accuracy']

    # Create and restore checkpoint (if one exists on the path)
    checkpoint_prefix = os.path.join(cfg.model_dir, 'ckpt')
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tfe.Checkpoint(model=model, optimizer=optimizer, step_counter=step_counter)
    # 從檢查點檔案恢複模型參數, 如果檔案存在.
    checkpoint.restore(tf.train.latest_checkpoint(cfg.model_dir))

    # Generator 使用生成器方式提供資料,支援eager模式
    train_ds = DataGenerator(train_ds[], train_ds[], batch_size=)
    test_ds = DataGenerator(test_ds[], test_ds[], batch_size=)
    # Train and evaluate for a set number of epochs.
    with tf.device(device): # 使用GPU必須有此一句
        for _ in range(cfg.train_epochs):
            start = time.time()

            model.fit_generator(generator=train_ds, epochs=)
            # model.fit(train_ds[0], train_ds[1], batch_size=200, epochs=1)
            # verbose=0 不顯示
            # 生成器
            # _loss, _acc = model.evaluate(test_ds[0], test_ds[1], batch_size=100, verbose=0)
            # _loss, _acc = model.evaluate_generator(generator=test_ds, steps=10000)
            #

            _loss, _acc = model.evaluate_generator(generator=test_ds, verbose=)
            print("test dataset loss: %f acc: %f" % (_loss, _acc))
            # train(model, optimizer, train_ds, step_counter, cfg.log_interval)
            end = time.time()
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                    (checkpoint.save_counter.numpy() + , step_counter.numpy(), end - start))

            # test(model, test_ds)
            checkpoint.save(checkpoint_prefix)

def arg_parse():
    """參數定義"""
    parser = argparse.ArgumentParser(description="Lenet-5 MNIST 模型")
    parser.add_argument("--lr", dest="lr", help="學習率", default=, type=float)
    parser.add_argument("--momentum", dest="momentum", help="SGD momentum.", default=)

    parser.add_argument("--data_dir", dest="data_dir", help="資料集下載下傳/儲存目錄", default="data/mnist/input_data/")
    parser.add_argument("--model_dir", dest="model_dir", help="模型儲存目錄", default="data/mnist/checkpoints/")
    parser.add_argument("--batch_size", dest="batch_size", help="訓練或測試時 Batch Size", default=, type=int)
    parser.add_argument("--train_epochs", dest="train_epochs", help="訓練時epoch疊代次數", default=, type=int)
    parser.add_argument("--log_interval", dest="log_interval", help="日志列印間隔", default=, type=int)

    # 傳回轉換好的結果
    return parser.parse_args()

def load_mnist():
    # 加載資料,轉換編碼格式并歸一化
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.astype("float32") / 
    x_test = x_test.astype("float32") / 

    # 擴充1維, 等效寫法
    x_train = x_train[:, :, :, None]
    x_test = x_test[:, :, :, None]

    print(x_train.shape, "train shape")
    print(x_test.shape, "test shape")

    y_train = to_categorical(y_train, )
    y_test = to_categorical(y_test, )
    train_ds = (x_train, y_train)
    test_ds = (x_test, y_test)

    return train_ds, test_ds

if __name__ == '__main__':
  args = arg_parse()
  run_mnist_eager(args)

           

運作結果

-- ::: I tensorflow/core/platform/cpu_feature_guard.cc:] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2018-09-10 16:50:42.035425: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1405] Found device 0 with properties: 
name: TITAN Xp major: 6 minor: 1 memoryClockRate(GHz): 1.582
pciBusID: 0000:85:00.0
totalMemory: 11.90GiB freeMemory: 11.74GiB
2018-09-10 16:50:42.035472: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1484] Adding visible gpu devices: 0
2018-09-10 16:50:42.494465: I tensorflow/core/common_runtime/gpu/gpu_device.cc:965] Device interconnect StreamExecutor with strength 1 edge matrix:
2018-09-10 16:50:42.494531: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971]      0 
2018-09-10 16:50:42.494539: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] 0:   N 
2018-09-10 16:50:42.498417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1097] Created TensorFlow device (/device:GPU:0 with 11355 MB memory) -> physical GPU (device: 0, name: TITAN Xp, pci bus id: 0000:85:00.0, compute capability: 6.1)
Using device /gpu:0, and data format channels_first.
(60000, 28, 28, 1) train shape
(10000, 28, 28, 1) test shape
2018-09-10 16:50:43.133449: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1484] Adding visible gpu devices: 0
2018-09-10 16:50:43.133544: I tensorflow/core/common_runtime/gpu/gpu_device.cc:965] Device interconnect StreamExecutor with strength 1 edge matrix:
2018-09-10 16:50:43.133554: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971]      0 
2018-09-10 16:50:43.133578: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] 0:   N 
2018-09-10 16:50:43.133817: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1097] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 11355 MB memory) -> physical GPU (device: 0, name: TITAN Xp, pci bus id: 0000:85:00.0, compute capability: 6.1)
Epoch 1/1
  1/300 [..............................] - ETA: 6:29 - loss: 2.3625 - compute_accuracy: 0.0850
  6/300 [..............................] - ETA: 1:06 - loss: 2.3624 - compute_accuracy: 0.0883
 10/300 [>.............................] - ETA: 40s - loss: 2.3619 - compute_accuracy: 0.1030 
 16/300 [>.............................] - ETA: 26s - loss: 2.3620 - compute_accuracy: 0.0934
 21/300 [=>............................] - ETA: 20s - loss: 2.3619 - compute_accuracy: 0.0967
 27/300 [=>............................] - ETA: 15s - loss: 2.3619 - compute_accuracy: 0.0989
 32/300 [==>...........................] - ETA: 13s - loss: 2.3617 - compute_accuracy: 0.1027
 37/300 [==>...........................] - ETA: 11s - loss: 2.3617 - compute_accuracy: 0.1022
 42/300 [===>..........................] - ETA: 10s - loss: 2.3615 - compute_accuracy: 0.1060
 47/300 [===>..........................] - ETA: 9s - loss: 2.3615 - compute_accuracy: 0.1086 
 53/300 [====>.........................] - ETA: 8s - loss: 2.3613 - compute_accuracy: 0.1125
 59/300 [====>.........................] - ETA: 7s - loss: 2.3612 - compute_accuracy: 0.1154
 64/300 [=====>........................] - ETA: 7s - loss: 2.3611 - compute_accuracy: 0.1176
 70/300 [======>.......................] - ETA: 6s - loss: 2.3610 - compute_accuracy: 0.1211
 74/300 [======>.......................] - ETA: 6s - loss: 2.3609 - compute_accuracy: 0.1239
 79/300 [======>.......................] - ETA: 5s - loss: 2.3608 - compute_accuracy: 0.1278
 84/300 [=======>......................] - ETA: 5s - loss: 2.3607 - compute_accuracy: 0.1322
 89/300 [=======>......................] - ETA: 5s - loss: 2.3606 - compute_accuracy: 0.1347
 94/300 [========>.....................] - ETA: 5s - loss: 2.3605 - compute_accuracy: 0.1389
100/300 [=========>....................] - ETA: 4s - loss: 2.3604 - compute_accuracy: 0.1428
106/300 [=========>....................] - ETA: 4s - loss: 2.3603 - compute_accuracy: 0.1461
110/300 [==========>...................] - ETA: 4s - loss: 2.3602 - compute_accuracy: 0.1485
114/300 [==========>...................] - ETA: 4s - loss: 2.3601 - compute_accuracy: 0.1507
119/300 [==========>...................] - ETA: 3s - loss: 2.3601 - compute_accuracy: 0.1536
123/300 [===========>..................] - ETA: 3s - loss: 2.3600 - compute_accuracy: 0.1563
129/300 [===========>..................] - ETA: 3s - loss: 2.3599 - compute_accuracy: 0.1605
134/300 [============>.................] - ETA: 3s - loss: 2.3598 - compute_accuracy: 0.1644
140/300 [=============>................] - ETA: 3s - loss: 2.3597 - compute_accuracy: 0.1671
145/300 [=============>................] - ETA: 3s - loss: 2.3596 - compute_accuracy: 0.1708
151/300 [==============>...............] - ETA: 2s - loss: 2.3594 - compute_accuracy: 0.1750
156/300 [==============>...............] - ETA: 2s - loss: 2.3593 - compute_accuracy: 0.1785
162/300 [===============>..............] - ETA: 2s - loss: 2.3591 - compute_accuracy: 0.1820
168/300 [===============>..............] - ETA: 2s - loss: 2.3590 - compute_accuracy: 0.1868
174/300 [================>.............] - ETA: 2s - loss: 2.3588 - compute_accuracy: 0.1917
180/300 [=================>............] - ETA: 2s - loss: 2.3587 - compute_accuracy: 0.1958
185/300 [=================>............] - ETA: 2s - loss: 2.3586 - compute_accuracy: 0.1999
191/300 [==================>...........] - ETA: 1s - loss: 2.3584 - compute_accuracy: 0.2055
196/300 [==================>...........] - ETA: 1s - loss: 2.3583 - compute_accuracy: 0.2090
201/300 [===================>..........] - ETA: 1s - loss: 2.3582 - compute_accuracy: 0.2123
207/300 [===================>..........] - ETA: 1s - loss: 2.3581 - compute_accuracy: 0.2165
212/300 [====================>.........] - ETA: 1s - loss: 2.3579 - compute_accuracy: 0.2199
216/300 [====================>.........] - ETA: 1s - loss: 2.3579 - compute_accuracy: 0.2221
222/300 [=====================>........] - ETA: 1s - loss: 2.3577 - compute_accuracy: 0.2261
228/300 [=====================>........] - ETA: 1s - loss: 2.3576 - compute_accuracy: 0.2296
232/300 [======================>.......] - ETA: 1s - loss: 2.3575 - compute_accuracy: 0.2324
236/300 [======================>.......] - ETA: 1s - loss: 2.3573 - compute_accuracy: 0.2350
241/300 [=======================>......] - ETA: 0s - loss: 2.3572 - compute_accuracy: 0.2379
247/300 [=======================>......] - ETA: 0s - loss: 2.3571 - compute_accuracy: 0.2411
253/300 [========================>.....] - ETA: 0s - loss: 2.3569 - compute_accuracy: 0.2454
257/300 [========================>.....] - ETA: 0s - loss: 2.3568 - compute_accuracy: 0.2478
260/300 [=========================>....] - ETA: 0s - loss: 2.3567 - compute_accuracy: 0.2490
266/300 [=========================>....] - ETA: 0s - loss: 2.3565 - compute_accuracy: 0.2524
272/300 [==========================>...] - ETA: 0s - loss: 2.3563 - compute_accuracy: 0.2555
277/300 [==========================>...] - ETA: 0s - loss: 2.3562 - compute_accuracy: 0.2576
282/300 [===========================>..] - ETA: 0s - loss: 2.3560 - compute_accuracy: 0.2598
286/300 [===========================>..] - ETA: 0s - loss: 2.3559 - compute_accuracy: 0.2617
290/300 [============================>.] - ETA: 0s - loss: 2.3557 - compute_accuracy: 0.2634
295/300 [============================>.] - ETA: 0s - loss: 2.3555 - compute_accuracy: 0.2649
300/300 [==============================] - 4s 15ms/step - loss: 2.3553 - compute_accuracy: 0.2668
 1/50 [..............................] - ETA: 0s
10/50 [=====>........................] - ETA: 0s
21/50 [===========>..................] - ETA: 0s
31/50 [=================>............] - ETA: 0s
42/50 [========================>.....] - ETA: 0s
50/50 [==============================] - 0s 5ms/step
test dataset loss: 2.343393 acc: 0.370100

Train time for epoch #1 (0 total steps): 4.813967
Epoch 1/1
  1/300 [..............................] - ETA: 5s - loss: 2.3444 - compute_accuracy: 0.3750
  6/300 [..............................] - ETA: 3s - loss: 2.3428 - compute_accuracy: 0.3792
 10/300 [>.............................] - ETA: 3s - loss: 2.3428 - compute_accuracy: 0.3675
 14/300 [>.............................] - ETA: 3s - loss: 2.3422 - compute_accuracy: 0.3614
 19/300 [>.............................] - ETA: 3s - loss: 2.3418 - compute_accuracy: 0.3508
 24/300 [=>............................] - ETA: 3s - loss: 2.3416 - compute_accuracy: 0.3412
 30/300 [==>...........................] - ETA: 3s - loss: 2.3409 - compute_accuracy: 0.3407
 36/300 [==>...........................] - ETA: 2s - loss: 2.3400 - compute_accuracy: 0.3392
 41/300 [===>..........................] - ETA: 2s - loss: 2.3394 - compute_accuracy: 0.3357
 47/300 [===>..........................] - ETA: 2s - loss: 2.3385 - compute_accuracy: 0.3290
 52/300 [====>.........................] - ETA: 2s - loss: 2.3376 - compute_accuracy: 0.3224
 57/300 [====>.........................] - ETA: 2s - loss: 2.3369 - compute_accuracy: 0.3169
 63/300 [=====>........................] - ETA: 2s - loss: 2.3359 - compute_accuracy: 0.3123
 69/300 [=====>........................] - ETA: 2s - loss: 2.3347 - compute_accuracy: 0.3064
 74/300 [======>.......................] - ETA: 2s - loss: 2.3340 - compute_accuracy: 0.2997
 79/300 [======>.......................] - ETA: 2s - loss: 2.3329 - compute_accuracy: 0.2946
 85/300 [=======>......................] - ETA: 2s - loss: 2.3316 - compute_accuracy: 0.2901
 91/300 [========>.....................] - ETA: 2s - loss: 2.3300 - compute_accuracy: 0.2848
 97/300 [========>.....................] - ETA: 2s - loss: 2.3279 - compute_accuracy: 0.2820
103/300 [=========>....................] - ETA: 2s - loss: 2.3263 - compute_accuracy: 0.2792
110/300 [==========>...................] - ETA: 1s - loss: 2.3240 - compute_accuracy: 0.2769
116/300 [==========>...................] - ETA: 1s - loss: 2.3220 - compute_accuracy: 0.2750
121/300 [===========>..................] - ETA: 1s - loss: 2.3196 - compute_accuracy: 0.2747
126/300 [===========>..................] - ETA: 1s - loss: 2.3175 - compute_accuracy: 0.2743
130/300 [============>.................] - ETA: 1s - loss: 2.3157 - compute_accuracy: 0.2749
135/300 [============>.................] - ETA: 1s - loss: 2.3136 - compute_accuracy: 0.2754
139/300 [============>.................] - ETA: 1s - loss: 2.3119 - compute_accuracy: 0.2757
144/300 [=============>................] - ETA: 1s - loss: 2.3094 - compute_accuracy: 0.2760
150/300 [==============>...............] - ETA: 1s - loss: 2.3064 - compute_accuracy: 0.2766
156/300 [==============>...............] - ETA: 1s - loss: 2.3026 - compute_accuracy: 0.2797
162/300 [===============>..............] - ETA: 1s - loss: 2.2988 - compute_accuracy: 0.2827
168/300 [===============>..............] - ETA: 1s - loss: 2.2948 - compute_accuracy: 0.2858
174/300 [================>.............] - ETA: 1s - loss: 2.2904 - compute_accuracy: 0.2899
179/300 [================>.............] - ETA: 1s - loss: 2.2864 - compute_accuracy: 0.2941
184/300 [=================>............] - ETA: 1s - loss: 2.2827 - compute_accuracy: 0.2983
190/300 [==================>...........] - ETA: 1s - loss: 2.2773 - compute_accuracy: 0.3047
195/300 [==================>...........] - ETA: 1s - loss: 2.2735 - compute_accuracy: 0.3088
200/300 [===================>..........] - ETA: 1s - loss: 2.2689 - compute_accuracy: 0.3142
206/300 [===================>..........] - ETA: 0s - loss: 2.2633 - compute_accuracy: 0.3201
212/300 [====================>.........] - ETA: 0s - loss: 2.2570 - compute_accuracy: 0.3267
218/300 [====================>.........] - ETA: 0s - loss: 2.2509 - compute_accuracy: 0.3325
224/300 [=====================>........] - ETA: 0s - loss: 2.2450 - compute_accuracy: 0.3377
230/300 [======================>.......] - ETA: 0s - loss: 2.2393 - compute_accuracy: 0.3425
236/300 [======================>.......] - ETA: 0s - loss: 2.2336 - compute_accuracy: 0.3473
242/300 [=======================>......] - ETA: 0s - loss: 2.2273 - compute_accuracy: 0.3524
248/300 [=======================>......] - ETA: 0s - loss: 2.2215 - compute_accuracy: 0.3574
253/300 [========================>.....] - ETA: 0s - loss: 2.2164 - compute_accuracy: 0.3626
259/300 [========================>.....] - ETA: 0s - loss: 2.2098 - compute_accuracy: 0.3691
265/300 [=========================>....] - ETA: 0s - loss: 2.2036 - compute_accuracy: 0.3748
270/300 [==========================>...] - ETA: 0s - loss: 2.1986 - compute_accuracy: 0.3793
275/300 [==========================>...] - ETA: 0s - loss: 2.1934 - compute_accuracy: 0.3839
281/300 [===========================>..] - ETA: 0s - loss: 2.1871 - compute_accuracy: 0.3893
287/300 [===========================>..] - ETA: 0s - loss: 2.1811 - compute_accuracy: 0.3943
294/300 [============================>.] - ETA: 0s - loss: 2.1742 - compute_accuracy: 0.4002
298/300 [============================>.] - ETA: 0s - loss: 2.1705 - compute_accuracy: 0.4031
300/300 [==============================] - 3s 10ms/step - loss: 2.1686 - compute_accuracy: 0.4046
 1/50 [..............................] - ETA: 0s
11/50 [=====>........................] - ETA: 0s
18/50 [=========>....................] - ETA: 0s
27/50 [===============>..............] - ETA: 0s
36/50 [====================>.........] - ETA: 0s
46/50 [==========================>...] - ETA: 0s
50/50 [==============================] - 0s 6ms/step
test dataset loss: 1.871826 acc: 0.650700

Train time for epoch #2 (0 total steps): 3.449475
Epoch 1/1
  1/300 [..............................] - ETA: 5s - loss: 1.8883 - compute_accuracy: 0.6350
  5/300 [..............................] - ETA: 4s - loss: 1.8871 - compute_accuracy: 0.6280
 10/300 [>.............................] - ETA: 3s - loss: 1.8768 - compute_accuracy: 0.6360
 16/300 [>.............................] - ETA: 3s - loss: 1.8804 - compute_accuracy: 0.6353
 22/300 [=>............................] - ETA: 3s - loss: 1.8771 - compute_accuracy: 0.6395
 27/300 [=>............................] - ETA: 3s - loss: 1.8742 - compute_accuracy: 0.6404
 32/300 [==>...........................] - ETA: 3s - loss: 1.8694 - compute_accuracy: 0.6464
 36/300 [==>...........................] - ETA: 3s - loss: 1.8647 - compute_accuracy: 0.6521
 41/300 [===>..........................] - ETA: 3s - loss: 1.8601 - compute_accuracy: 0.6593
 46/300 [===>..........................] - ETA: 2s - loss: 1.8593 - compute_accuracy: 0.6605
 51/300 [====>.........................] - ETA: 2s - loss: 1.8554 - compute_accuracy: 0.6644
 56/300 [====>.........................] - ETA: 2s - loss: 1.8498 - compute_accuracy: 0.6700
 61/300 [=====>........................] - ETA: 2s - loss: 1.8462 - compute_accuracy: 0.6739
 67/300 [=====>........................] - ETA: 2s - loss: 1.8425 - compute_accuracy: 0.6775
 73/300 [======>.......................] - ETA: 2s - loss: 1.8385 - compute_accuracy: 0.6814
 79/300 [======>.......................] - ETA: 2s - loss: 1.8361 - compute_accuracy: 0.6828
 85/300 [=======>......................] - ETA: 2s - loss: 1.8329 - compute_accuracy: 0.6852
 91/300 [========>.....................] - ETA: 2s - loss: 1.8300 - compute_accuracy: 0.6873
 95/300 [========>.....................] - ETA: 2s - loss: 1.8281 - compute_accuracy: 0.6890
 99/300 [========>.....................] - ETA: 2s - loss: 1.8271 - compute_accuracy: 0.6898
105/300 [=========>....................] - ETA: 2s - loss: 1.8251 - compute_accuracy: 0.6914
111/300 [==========>...................] - ETA: 2s - loss: 1.8228 - compute_accuracy: 0.6928
117/300 [==========>...................] - ETA: 1s - loss: 1.8204 - compute_accuracy: 0.6949
123/300 [===========>..................] - ETA: 1s - loss: 1.8183 - compute_accuracy: 0.6966
128/300 [===========>..................] - ETA: 1s - loss: 1.8161 - compute_accuracy: 0.6985
133/300 [============>.................] - ETA: 1s - loss: 1.8146 - compute_accuracy: 0.6994
138/300 [============>.................] - ETA: 1s - loss: 1.8119 - compute_accuracy: 0.7017
143/300 [=============>................] - ETA: 1s - loss: 1.8111 - compute_accuracy: 0.7020
149/300 [=============>................] - ETA: 1s - loss: 1.8088 - compute_accuracy: 0.7039
154/300 [==============>...............] - ETA: 1s - loss: 1.8079 - compute_accuracy: 0.7043
158/300 [==============>...............] - ETA: 1s - loss: 1.8055 - compute_accuracy: 0.7066
163/300 [===============>..............] - ETA: 1s - loss: 1.8045 - compute_accuracy: 0.7075
167/300 [===============>..............] - ETA: 1s - loss: 1.8027 - compute_accuracy: 0.7096
172/300 [================>.............] - ETA: 1s - loss: 1.8008 - compute_accuracy: 0.7119
177/300 [================>.............] - ETA: 1s - loss: 1.7992 - compute_accuracy: 0.7136
182/300 [=================>............] - ETA: 1s - loss: 1.7979 - compute_accuracy: 0.7153
187/300 [=================>............] - ETA: 1s - loss: 1.7962 - compute_accuracy: 0.7169
192/300 [==================>...........] - ETA: 1s - loss: 1.7942 - compute_accuracy: 0.7189
197/300 [==================>...........] - ETA: 1s - loss: 1.7922 - compute_accuracy: 0.7208
202/300 [===================>..........] - ETA: 1s - loss: 1.7908 - compute_accuracy: 0.7222
207/300 [===================>..........] - ETA: 0s - loss: 1.7890 - compute_accuracy: 0.7237
213/300 [====================>.........] - ETA: 0s - loss: 1.7870 - compute_accuracy: 0.7255
218/300 [====================>.........] - ETA: 0s - loss: 1.7854 - compute_accuracy: 0.7269
224/300 [=====================>........] - ETA: 0s - loss: 1.7835 - compute_accuracy: 0.7287
230/300 [======================>.......] - ETA: 0s - loss: 1.7814 - compute_accuracy: 0.7305
236/300 [======================>.......] - ETA: 0s - loss: 1.7790 - compute_accuracy: 0.7329
240/300 [=======================>......] - ETA: 0s - loss: 1.7776 - compute_accuracy: 0.7343
245/300 [=======================>......] - ETA: 0s - loss: 1.7759 - compute_accuracy: 0.7357
249/300 [=======================>......] - ETA: 0s - loss: 1.7749 - compute_accuracy: 0.7365
254/300 [========================>.....] - ETA: 0s - loss: 1.7731 - compute_accuracy: 0.7381
259/300 [========================>.....] - ETA: 0s - loss: 1.7715 - compute_accuracy: 0.7394
264/300 [=========================>....] - ETA: 0s - loss: 1.7704 - compute_accuracy: 0.7401
269/300 [=========================>....] - ETA: 0s - loss: 1.7688 - compute_accuracy: 0.7416
273/300 [==========================>...] - ETA: 0s - loss: 1.7676 - compute_accuracy: 0.7426
278/300 [==========================>...] - ETA: 0s - loss: 1.7663 - compute_accuracy: 0.7438
283/300 [===========================>..] - ETA: 0s - loss: 1.7650 - compute_accuracy: 0.7449
289/300 [===========================>..] - ETA: 0s - loss: 1.7631 - compute_accuracy: 0.7468
293/300 [============================>.] - ETA: 0s - loss: 1.7621 - compute_accuracy: 0.7476
298/300 [============================>.] - ETA: 0s - loss: 1.7607 - compute_accuracy: 0.7487
300/300 [==============================] - 3s 11ms/step - loss: 1.7603 - compute_accuracy: 0.7491
 1/50 [..............................] - ETA: 0s
10/50 [=====>........................] - ETA: 0s
19/50 [==========>...................] - ETA: 0s
27/50 [===============>..............] - ETA: 0s
38/50 [=====================>........] - ETA: 0s
48/50 [===========================>..] - ETA: 0s
50/50 [==============================] - 0s 6ms/step
test dataset loss: 1.678678 acc: 0.812900

Train time for epoch #3 (0 total steps): 3.654161
Epoch 1/1
  1/300 [..............................] - ETA: 6s - loss: 1.7017 - compute_accuracy: 0.7900
  6/300 [..............................] - ETA: 3s - loss: 1.6759 - compute_accuracy: 0.8167
 11/300 [>.............................] - ETA: 3s - loss: 1.6847 - compute_accuracy: 0.8064
 17/300 [>.............................] - ETA: 3s - loss: 1.6871 - compute_accuracy: 0.8056
 22/300 [=>............................] - ETA: 3s - loss: 1.6872 - compute_accuracy: 0.8061
 28/300 [=>............................] - ETA: 2s - loss: 1.6866 - compute_accuracy: 0.8057
 33/300 [==>...........................] - ETA: 2s - loss: 1.6869 - compute_accuracy: 0.8052
 37/300 [==>...........................] - ETA: 2s - loss: 1.6865 - compute_accuracy: 0.8055
 42/300 [===>..........................] - ETA: 2s - loss: 1.6835 - compute_accuracy: 0.8086
 47/300 [===>..........................] - ETA: 2s - loss: 1.6834 - compute_accuracy: 0.8080
 52/300 [====>.........................] - ETA: 2s - loss: 1.6802 - compute_accuracy: 0.8113
 56/300 [====>.........................] - ETA: 2s - loss: 1.6800 - compute_accuracy: 0.8108
 61/300 [=====>........................] - ETA: 2s - loss: 1.6800 - compute_accuracy: 0.8105
 66/300 [=====>........................] - ETA: 2s - loss: 1.6797 - compute_accuracy: 0.8105
 71/300 [======>.......................] - ETA: 2s - loss: 1.6784 - compute_accuracy: 0.8118
 77/300 [======>.......................] - ETA: 2s - loss: 1.6778 - compute_accuracy: 0.8119
 82/300 [=======>......................] - ETA: 2s - loss: 1.6773 - compute_accuracy: 0.8124
 87/300 [=======>......................] - ETA: 2s - loss: 1.6757 - compute_accuracy: 0.8139
 93/300 [========>.....................] - ETA: 2s - loss: 1.6741 - compute_accuracy: 0.8152
 99/300 [========>.....................] - ETA: 2s - loss: 1.6739 - compute_accuracy: 0.8151
105/300 [=========>....................] - ETA: 2s - loss: 1.6733 - compute_accuracy: 0.8158
111/300 [==========>...................] - ETA: 2s - loss: 1.6735 - compute_accuracy: 0.8153
115/300 [==========>...................] - ETA: 1s - loss: 1.6740 - compute_accuracy: 0.8147
120/300 [===========>..................] - ETA: 1s - loss: 1.6735 - compute_accuracy: 0.8149
125/300 [===========>..................] - ETA: 1s - loss: 1.6725 - compute_accuracy: 0.8158
131/300 [============>.................] - ETA: 1s - loss: 1.6720 - compute_accuracy: 0.8161
135/300 [============>.................] - ETA: 1s - loss: 1.6710 - compute_accuracy: 0.8170
140/300 [=============>................] - ETA: 1s - loss: 1.6707 - compute_accuracy: 0.8171
145/300 [=============>................] - ETA: 1s - loss: 1.6702 - compute_accuracy: 0.8173
150/300 [==============>...............] - ETA: 1s - loss: 1.6700 - compute_accuracy: 0.8174
155/300 [==============>...............] - ETA: 1s - loss: 1.6699 - compute_accuracy: 0.8173
161/300 [===============>..............] - ETA: 1s - loss: 1.6696 - compute_accuracy: 0.8178
167/300 [===============>..............] - ETA: 1s - loss: 1.6696 - compute_accuracy: 0.8175
173/300 [================>.............] - ETA: 1s - loss: 1.6698 - compute_accuracy: 0.8171
179/300 [================>.............] - ETA: 1s - loss: 1.6694 - compute_accuracy: 0.8174
184/300 [=================>............] - ETA: 1s - loss: 1.6693 - compute_accuracy: 0.8175
190/300 [==================>...........] - ETA: 1s - loss: 1.6687 - compute_accuracy: 0.8179
196/300 [==================>...........] - ETA: 1s - loss: 1.6682 - compute_accuracy: 0.8185
201/300 [===================>..........] - ETA: 1s - loss: 1.6677 - compute_accuracy: 0.8188
206/300 [===================>..........] - ETA: 0s - loss: 1.6677 - compute_accuracy: 0.8187
211/300 [====================>.........] - ETA: 0s - loss: 1.6672 - compute_accuracy: 0.8190
216/300 [====================>.........] - ETA: 0s - loss: 1.6671 - compute_accuracy: 0.8189
222/300 [=====================>........] - ETA: 0s - loss: 1.6669 - compute_accuracy: 0.8190
228/300 [=====================>........] - ETA: 0s - loss: 1.6664 - compute_accuracy: 0.8192
233/300 [======================>.......] - ETA: 0s - loss: 1.6664 - compute_accuracy: 0.8192
238/300 [======================>.......] - ETA: 0s - loss: 1.6662 - compute_accuracy: 0.8194
244/300 [=======================>......] - ETA: 0s - loss: 1.6661 - compute_accuracy: 0.8194
250/300 [========================>.....] - ETA: 0s - loss: 1.6659 - compute_accuracy: 0.8194
256/300 [========================>.....] - ETA: 0s - loss: 1.6664 - compute_accuracy: 0.8186
261/300 [=========================>....] - ETA: 0s - loss: 1.6662 - compute_accuracy: 0.8186
267/300 [=========================>....] - ETA: 0s - loss: 1.6657 - compute_accuracy: 0.8190
271/300 [==========================>...] - ETA: 0s - loss: 1.6653 - compute_accuracy: 0.8193
276/300 [==========================>...] - ETA: 0s - loss: 1.6653 - compute_accuracy: 0.8192
281/300 [===========================>..] - ETA: 0s - loss: 1.6652 - compute_accuracy: 0.8193
285/300 [===========================>..] - ETA: 0s - loss: 1.6653 - compute_accuracy: 0.8191
290/300 [============================>.] - ETA: 0s - loss: 1.6656 - compute_accuracy: 0.8188
293/300 [============================>.] - ETA: 0s - loss: 1.6654 - compute_accuracy: 0.8189
297/300 [============================>.] - ETA: 0s - loss: 1.6649 - compute_accuracy: 0.8193
300/300 [==============================] - 3s 11ms/step - loss: 1.6646 - compute_accuracy: 0.8195
 1/50 [..............................] - ETA: 0s
10/50 [=====>........................] - ETA: 0s
21/50 [===========>..................] - ETA: 0s
31/50 [=================>............] - ETA: 0s
41/50 [=======================>......] - ETA: 0s
50/50 [==============================] - 0s 5ms/step
test dataset loss: 1.645696 acc: 0.831400

Train time for epoch #4 (0 total steps): 3.597641

Process finished with exit code 0

           

繼續閱讀