keras實作簡單性别識别(二分類問題)
第一步:準備好需要的庫
- tensorflow 1.4.0
- h5py 2.7.0
- hdf5 1.8.15.1
- Keras 2.0.8
- opencv-python 3.3.0
- numpy 1.13.3+mkl
- 所需要的人臉檢測子產品
- mtcnn和opencv
- https://pan.baidu.com/s/1rhP7mcnAtiojhk8eiLroEw
第二步:準備資料集:
将性别不同的圖檔按照不同的分類放到不同的檔案夾内。

資料集
https://pan.baidu.com/s/1_f36Gw4PWztUXZWH_jLWcw1 import shutil
2
3 # 讀取檔案中圖檔資訊根據性别分類圖檔到對應目錄中
4 dirroot = "D:\\Users\\a\\Pictures\\adience"
5 f = open(dirroot+"\\fold_frontal_3_data.txt","r")
6 i = 0
7
8 for line in f.readlines():
9 line = line.split()
10 dir = line[0]
11
12 imgName = "landmark_aligned_face."+ line[2] +'.'+ line[1]
13 if i > 0:
14 if line[5]== "f":
15 print("female")
16 shutil.copy(dirroot+'\\faces\\'+dir+'\\'+imgName, "D:\\pycode\\learn\\data\\validation\\"+imgName)
17 # 移動圖檔到female目錄
18 elif line[5]=="m":
19 print("male")
20 shutil.copy(dirroot+'\\faces\\'+dir+'\\'+imgName, "D:\\pycode\\learn\\data\\validation\\"+imgName)
21 # 移動圖檔到male目錄
22 else:
23 print("N")
24 # 未識别男女
25 i += 1
26 f.close()
使用ImageDataGenerator,來對圖檔進行歸一化和随機旋轉。使用flow_from_directory,來自動産生圖檔标簽生成器。
1 class Dataset(object):
2
3 def __init__(self):
4 self.train = None
5 self.valid = None
6
7
8 def read(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE):
9 train_datagen = ImageDataGenerator(
10 rescale=1. / 255,
11 horizontal_flip=True)
12
13 test_datagen = ImageDataGenerator(rescale=1. / 255)
14
15 train_generator = train_datagen.flow_from_directory(
16 train_data_dir,
17 target_size=(img_rows, img_cols),
18 batch_size=batch_size,
19 class_mode='binary')
20
21 validation_generator = test_datagen.flow_from_directory(
22 validation_data_dir,
23 target_size=(img_rows, img_cols),
24 batch_size=batch_size,
25 class_mode='binary')
26
27 self.train = train_generator
28 self.valid = validation_generator
第三部:網絡
1 class Model(object):
2
3
4
5 def __init__(self):
6 self.model = Sequential()
7 self.model.add(Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE,IMAGE_SIZE,3)))
8 self.model.add(Activation('relu'))
9 self.model.add(MaxPooling2D(pool_size=(2, 2)))
10
11 self.model.add(Conv2D(32, (3, 3)))
12 self.model.add(Activation('relu'))
13 self.model.add(MaxPooling2D(pool_size=(2, 2)))
14
15 self.model.add(Conv2D(64, (3, 3)))
16 self.model.add(Activation('relu'))
17 self.model.add(MaxPooling2D(pool_size=(2, 2)))
18
19 self.model.add(Conv2D(64, (3, 3)))
20 self.model.add(Activation('relu'))
21 self.model.add(MaxPooling2D(pool_size=(2, 2)))
22
23 self.model.add(Flatten())
24 self.model.add(Dense(64))
25 self.model.add(Activation('relu'))
26 self.model.add(Dropout(0.85))
27 self.model.add(Dense(1))
28 self.model.add(Activation('sigmoid'))
29
30
31 def train(self, dataset, batch_size=batch_size, nb_epoch=epochs):
32
33 self.model.compile(loss='binary_crossentropy',
34 optimizer='adam',
35 metrics=['accuracy'])
36 self.model.fit_generator(dataset.train,
37 steps_per_epoch=nb_train_samples // batch_size,
38 epochs=epochs,
39 validation_data=dataset.valid,
40 validation_steps=nb_validation_samples//batch_size)
41
42
43 def save(self, file_path=FILE_PATH):
44 print('Model Saved.')
45 self.model.save_weights(file_path)
46
47 def load(self, file_path=FILE_PATH):
48 print('Model Loaded.')
49 self.model.load_weights(file_path)
50
51 def predict(self, image):
52 # 預測樣本分類
53 img = image.resize((1, IMAGE_SIZE, IMAGE_SIZE, 3))
54 img = image.astype('float32')
55 img /= 255
56
57 #歸一化
58 result = self.model.predict(img)
59 print(result)
60 # 機率
61 result = self.model.predict_classes(img)
62 print(result)
63 # 0/1
64
65 return result[0]
66
67 def evaluate(self, dataset):
68 # 測試樣本準确率
69 score = self.model.evaluate_generator(dataset.valid,steps=2)
70 print("樣本準确率%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
第四部:主程式
1 if __name__ == '__main__':
2 dataset = Dataset()
3 dataset.read()
4
5
6 model = Model()
7 model.load()
8 model.train(dataset)
9 model.evaluate(dataset)
10 model.save()
第五步:識别程式
opencv檢測子產品版
1 #!/usr/bin/env python
2 """
3 從攝像頭中擷取圖像實時監測
4 """
5 import numpy as np
6 import cv2
7 from GenderTrain import Model
8
9
10 def detect(img, cascade):
11 """
12 檢測圖像是否含有人臉部分
13 :param img: 待檢測幀圖像
14 :param cascade: 面部對象檢測器
15 :return: 面部圖像标記
16 """
17 rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
18 flags=cv2.CASCADE_SCALE_IMAGE)
19 if len(rects) == 0:
20 return []
21 rects[:,2:] += rects[:,:2]
22 return rects
23
24
25 def draw_rects(img, rects, color):
26 """
27 根據圖像标記人臉區域與性别
28 :param img:
29 :param rects:
30 :param color:
31 :return:
32 """
33 for x, y, w, h in rects:
34 face = img[x:x+w,y:y+h]
35 face = cv2.resize(face,(224,224))
36 if gender.predict(face)==1:
37 text = "Male"
38 else:
39 text = "Female"
40 cv2.rectangle(img, (x, y), (w, h), color, 2)
41 cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), lineType=cv2.LINE_AA)
42
43
44 if __name__ == '__main__':
45 haar__cascade_path = "D:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml"
46
47 cascade = cv2.CascadeClassifier( haar__cascade_path)
48 cam = cv2.VideoCapture(0)
49 # 擷取攝像頭視訊
50 gender = Model()
51 gender.load()
52 # 加載性别模型
53 while True:
54 ret, img = cam.read()
55 # 讀取幀圖像
56 rects = detect(img, cascade)
57 print(rects)
58 vis = img.copy()
59 draw_rects(vis, rects, (0, 255, 0))
60 cv2.imshow('Gender', vis)
61 if cv2.waitKey(5) == 27:
62 break
63 cv2.destroyAllWindows()
MTCNN檢測版
"""
從攝像頭中擷取圖像實時監測
"""
import PIL
import numpy as np
import detect_face
import tensorflow as tf
import cv2
from GenderTrain import Model
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess,
'E:\\pycode\\real-time-deep-face-recognition-master\\20170512-110547')
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
margin = 44
frame_interval = 3
batch_size = 1000
image_size = 182
input_image_size = 160
def draw_rects(img, rects, color):
"""
根據圖像标記人臉區域與性别
:param img:
:param rects:
:param color:
:return:
"""
for x, y, w, h in rects:
face = img[x:x+w,y:y+h]
face = cv2.resize(face,(224,224))
if gender.predict(face)==1:
text = "Male"
else:
text = "Female"
cv2.rectangle(img, (x, y), (w, h), color, 2)
cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), lineType=cv2.LINE_AA)
if __name__ == '__main__':
cam = cv2.VideoCapture(0)
# 擷取攝像頭視訊
gender = Model()
gender.load()
# 加載性别模型
while True:
ret, img = cam.read()
# 讀取幀圖像
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
# 讀取幀圖像
for face_position in bounding_boxes:
face_position = face_position.astype(int)
print(face_position[0:4])
rects = [[face_position[0], face_position[1], face_position[2], face_position[3]]]
vis = img.copy()
draw_rects(vis, rects, (255, 255, 255))
cv2.imshow('Gender', vis)
if cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
完全版
import os
import random
import cv2
import numpy as np
from tensorflow.contrib.keras.api.keras.preprocessing.image import ImageDataGenerator,img_to_array
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D
from tensorflow.contrib.keras.api.keras.optimizers import SGD
IMAGE_SIZE = 182
# 訓練圖檔大小
epochs = 150#原來是50
# 周遊次數
batch_size = 32
# 批量大小
nb_train_samples = 512*2
# 訓練樣本總數
nb_validation_samples = 128*2
# 測試樣本總數
train_data_dir = 'D:\\code\\learn\\data_sex\\train_data\\'
validation_data_dir = 'D:\\data_sex\\test_data\\'
# 樣本圖檔所在路徑
FILE_PATH = 'Gender_new.h5'
# 模型存放路徑
class Dataset(object):
def __init__(self):
self.train = None
self.valid = None
def read(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE):
train_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_rows, img_cols),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_rows, img_cols),
batch_size=batch_size,
class_mode='binary')
self.train = train_generator
self.valid = validation_generator
class Model(object):
def __init__(self):
self.model = Sequential()
self.model.add(Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE,IMAGE_SIZE,3)))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Conv2D(32, (3, 3)))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Conv2D(64, (3, 3)))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Flatten())
self.model.add(Dense(64))
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
def train(self, dataset, batch_size=batch_size, nb_epoch=epochs):
self.model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
self.model.fit_generator(dataset.train,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=dataset.valid,
validation_steps=nb_validation_samples//batch_size)
def save(self, file_path=FILE_PATH):
print('Model Saved.')
self.model.save_weights(file_path)
def load(self, file_path=FILE_PATH):
print('Model Loaded.')
self.model.load_weights(file_path)
def predict(self, image):
# 預測樣本分類
img = image.resize((1, IMAGE_SIZE, IMAGE_SIZE, 3))
img = image.astype('float32')
img /= 255
#歸一化
result = self.model.predict(img)
print(result)
# 機率
result = self.model.predict_classes(img)
print(result)
# 0/1
return result[0]
def evaluate(self, dataset):
# 測試樣本準确率
score = self.model.evaluate_generator(dataset.valid,steps=2)
print("樣本準确率%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
if __name__ == '__main__':
dataset = Dataset()
dataset.read()
model = Model()
model.load()
model.train(dataset)
model.evaluate(dataset)
model.save()