本例使用Tensorflow Model Zoo中的MobileNetV2进行迁移学习,预测人是否带了面罩。
完整的jupyter notebook地址如下,
simple_learn/deep_learning/18_training_neural_network_with_keras/18. Training Neural Network with Keras.ipynb · master · zhuge20100104 / cpp_practice · GitLab
代码如下,
# 各种import
# imutils是openCV的一部分,需要安装openCV-python
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import numpy as np
import os
# from imutils import paths
# 超参数
# 这里只搞两个epochs的原因,jupyter默认只用一个核,比较慢。所以训练两个epoch玩一下,实际上 20个epochs效果比较好
INIT_LR = 1e-4
EPOCHES = 2
BS = 32
DIRECTORY = 'dataset'
# 读入图片和label数据,label默认是目录名,待会还要转换
data = []
labels = []
# preprocess_input 函数的主要作用是对输入图像进行预处理,以便将其输入到 MobileNetV2 模型中进行推断或训练。具体来说,这个函数会执行以下操作:
# 归一化:将图像的像素值从 [0, 255] 范围归一化到 [-1, 1] 范围。这是神经网络模型通常需要的输入格式,因为它有助于模型训练的稳定性和收敛速度。
# 颜色通道调整:对于 MobileNetV2,preprocess_input 还会将图像的颜色通道从 RGB 格式转换为 BGR 格式(即蓝色、绿色和红色通道的顺序会被调换)。
# 这是因为 MobileNetV2 和其他一些模型是在这种颜色空间下训练的。
for category in os.listdir(DIRECTORY):
path_ = os.path.join(DIRECTORY, category)
for img in os.listdir(path_):
img_path = os.path.join(path_, img)
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(category)
print(labels[:10])
# 预处理数据,转换labels成 onehot encoding
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
data = np.array(data, dtype='float32')
labels = np.array(labels)
labels[:-10]
# 分割训练集和测试集
# stratify参数含义
# 在train_test_split函数中,stratify参数用于确保训练集和测试集中的类别比例与原始数据集中的类别比例相同。
# 这在处理不平衡数据集时特别有用,因为它有助于确保模型在训练时能够接触到所有类别的样本,并在测试时能够公平地评估模型对所有类别的性能。
# 具体来说,当您使用stratify参数时,您需要提供一个与输入数据相对应的标签数组或列表。
#train_test_split函数将根据这些标签的类别分布来划分数据,确保训练集和测试集中的类别比例与原始数据集中的比例一致
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2, stratify=labels, random_state=42)
# 图像增强
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode='nearest'
)
# Load the MobileNetV2 network, ensuring the head FC layer sets are left off
# include_top=False, 把最后一个avg_pool和Dense全连接层去掉了
baseModel = MobileNetV2(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
baseModel.summary()
# Construct the head of the model that will be placed on top of the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name='flatten')(headModel)
headModel = Dense(128, activation='relu')(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation='softmax')(headModel)
# Place the head FC model on top of the baseModel,
# This will become the actual model we will train
model = Model(inputs=baseModel.input, outputs=headModel)
# 不更新basemodel的权重,网络太大了,更新起来比较耗算力
# loop over all layers in the base model and freezed them so they will not be
# updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# 编译模型
# Compile our model
print('[INFO] compiling model...')
opt = Adam(learning_rate=INIT_LR, decay=INIT_LR/EPOCHES)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
# fit 模型,包括增强后的图像数据
# train accuracy 和 validation accuray的值一样 或者类似的时候,比较好,说明bias比较正常,
# 如果 validation accuracy 比 train accuracy要高, 说明bias较大,出问题了
# Train the head of the network
print('[INFO] training head...')
history = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHES
)
# make predictions on the testing set
print('[INFO] evaluating network...')
predIdxs = model.predict(testX, batch_size=BS)
# 打印classification_report
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))
# 保存模型到硬盘
print('[INFO] saving mask detector model...')
model.save('mask_detector.keras')
# train和validation的accuracy
# train和validation的loss
# 走到一块叫converge[收敛]
# 显示训练集和验证集的acc和loss曲线
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
程序输出的结果如下,