天天看点

用preprocessing.image_dataset_from_directory处理数据后送入VGG16

import tensorflow as tf
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import os
import PIL
import pathlib
import numpy as np
data_dir=('data-class')
data_dir=pathlib.Path(data_dir)
image_count=len(list(data_dir.glob('*/*')))
print(image_count)
# -----------------------------------------------------
for item in data_dir.iterdir():
    print(item.name)#查看下面有哪些子文件

# roses=list(data_dir.glob('roses/*'))
# print(roses[0])
# img=PIL.Image.open(str(roses[0]))
# plt.imshow(img)
# plt.show() #查看roses子文件夹下面的第一个图像
# -----------------------------------------------------

batch_size=32
img_height=224
img_width=224
train_ds=keras.preprocessing.image_dataset_from_directory(
    data_dir,validation_split=0.2,subset='training',
    seed=100,image_size=(img_height,img_width),batch_size=batch_size)
val_ds=keras.preprocessing.image_dataset_from_directory(
    data_dir,validation_split=0.2,subset='validation',
    seed=100,image_size=(img_height,img_width),batch_size=batch_size)

class_names=train_ds.class_names
print(class_names)
# for images,labels in train_ds.take(1):
#     for i in range(9):
#         ax=plt.subplot(3,3,i+1)
#         plt.imshow(images[i].numpy().astype('uint8'))
#         # plt.imshow(images[i].numpy().astype("uint8"))
#         plt.title(class_names[labels[i]])
#         plt.colorbar()
#         plt.show()
for image_batch,labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break
AUTOTUNE=tf.data.experimental.AUTOTUNE
train_ds=train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds=val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# 归一化数据
normalization_layer=keras.layers.experimental.preprocessing.Rescaling(1./255)
normalized_ds=train_ds.map(lambda x,y:(normalization_layer(x),y))
image_batch,labels_batch=next(iter(normalized_ds))
first_image=image_batch[0]

num_classes=2

model=keras.Sequential([
keras.layers.experimental.preprocessing.Rescaling(1./255,input_shape=(img_height,img_width,3)),
keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_1'),
keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2'),
keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='pool1'),
# block2
keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1'),
keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2'),
keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='pool2'),
# block3
keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1'),
keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2'),
keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3'),
keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='pool3'),
# block4
keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1'),
keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2'),
keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3'),
keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='pool4'),
# block5
keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1'),
keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2'),
keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3'),
keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='pool5'),

#dense
keras.layers.Flatten(name='flatten'),
keras.layers.Dense(4096, activation='relu', name='fc6'),
# x=keras.layers.Activation('relu',name='fc6/relu'),
keras.layers.Dense(4096, activation='relu', name='fc7'),
keras.layers.Dropout(0.2),
keras.layers.Dense(num_classes, activation='softmax', name='fc8')
])

model.compile(optimizer='rmsprop',loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
# model.compile(optimizer='adam',loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
#               metrics=['accuracy'])

model.summary()

epochs=10
history=model.fit(train_ds,validation_data=val_ds,epochs=epochs)
           

参考:Image classification  |  TensorFlow Core (google.cn)

categorical_accuracy和sparse_categorical_accuracyde区别见下面博客链接 #https://blog.csdn.net/qq_20011607/article/details/89213908

继续阅读