模型搭建、数据预处理、模型训练
import keras
from keras import layers
import numpy as np
import cv2
import os
batch_size = 2
classe_nums = 2
def U_netModel(num_classes, input_shape=(512, 512, 1)):
inputs = layers.Input(shape=input_shape)
conv1_1 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(inputs)
conv1_2 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(conv1_1)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1_2)
conv2_1 = layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(pool1)
conv2_2 = layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(conv2_1)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2_2)
conv3_1 = layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(pool2)
conv3_2 = layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(conv3_1)
pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3_2)
conv4_1 = layers.Conv2D(filters=512, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(pool3)
conv4_2 = layers.Conv2D(filters=512, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(conv4_1)
pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4_2)
conv5_1 = layers.Conv2D(filters=1024, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(pool4)
conv5_2 = layers.Conv2D(filters=1024, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(conv5_1)
deconv6_up = layers.Conv2D(filters=512, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(layers.UpSampling2D((2, 2))(conv5_2))
merge6 = layers.concatenate([conv4_2, deconv6_up])
deconv6_1 = layers.Conv2D(filters=512, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(merge6)
deconv6_2 = layers.Conv2D(filters=512, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(deconv6_1)
deconv7_up = layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(layers.UpSampling2D((2, 2))(deconv6_2))
merge7 = layers.concatenate([conv3_2, deconv7_up])
deconv7_1 = layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(merge7)
deconv7_2 = layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(deconv7_1)
deconv8_up = layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(layers.UpSampling2D((2, 2))(deconv7_2))
merge8 = layers.concatenate([conv2_2, deconv8_up])
deconv8_1 = layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(merge8)
deconv8_2 = layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(deconv8_1)
deconv9_up = layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(layers.UpSampling2D((2, 2))(deconv8_2))
merge9 = layers.concatenate([conv1_2, deconv9_up])
deconv9_1 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(merge9)
deconv9_2 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="relu")(deconv9_1)
###########num_classes的值根据有多少类别决定
###########二分类激活函数sigmoid,labels是用one_hot编码
outputs = layers.Conv2D(filters=num_classes, kernel_size=(3, 3), padding="same", kernel_initializer="he_normal",
activation="sigmoid")(deconv9_2)
model = keras.models.Model(inputs=inputs, outputs=outputs)
return model
###########读取图片的文件名
def read_file_names():
dataSetNames = []
with open("dataset2/train.txt") as f:
for line in f:
dataSetNames.append(line)
return dataSetNames
############分割训练集验证集
def split_data_set(dataSet, ratio):
assert type(dataSet) == list
total_nums = len(dataSet)
nums_train = int(total_nums * 0.8)
nums_validation = total_nums - nums_train
train_dataSet = dataSet[:nums_train]
validation_dataSet = dataSet[nums_train:]
return train_dataSet, validation_dataSet
def generate__data_from_file(fileNames, batch_size, height, width, class_nums):
total = len(fileNames)
i = 0
while True:
train_X = []
train_Y = []
for i in range(0, batch_size):
if i == 0:
np.random.permutation(fileNames)
name = fileNames[i]
x_name = name.split(";")[0]
x_img = cv2.imread(r"./dataset2/jpg" + '/' + x_name, 0)
x_img = np.resize(x_img, (height, width, 1))
x_img = np.array(x_img)
x_img = x_img / 255.
train_X.append(x_img)
y_name = name.split(";")[1].strip()
y_img = cv2.imread(r"./dataset2/png" + '/' + y_name, 1)
y_img = np.resize(y_img, (height, width, 3))
b, g, r = cv2.split(y_img)
y_img = cv2.merge([r, g, b])
y_img = np.array(y_img)
seg_labels = np.zeros((int(height), int(width), classes))
for c in range(class_nums):
seg_labels[:, :, c] = (y_img[:, :, 0] == c).astype(int)
train_Y.append(seg_labels)
i = (i + 1) % batch_size
yield np.array(train_X), np.array(train_Y)
model = U_netModel(2, input_shape=(512, 512, 1))
##########编译模型
model.compile(
optimizer=keras.optimizers.rmsprop(lr=1e-4),
loss="binary_crossentropy",
metrics=["acc"]
)
#########获取数据集文件名
dataSetNames = read_file_names()
###########分割训练集与验证集
train_dataSetNames, validation_dataSetNames = split_data_set(dataSetNames, 0.2)
model.fit_generator(
generate__data_from_file(train_dataSetNames, batch_size, 512, 512, classe_nums),
steps_per_epoch=len(train_dataSetNames) // batch_size,
epochs=1,
validation_data=generate__data_from_file(validation_dataSetNames, batch_size, 512, 512, classe_nums),
validation_steps=len(validation_dataSetNames) // batch_size,
)
savedir = './savedir'
if not os.path.exists(savedir):
os.mkdir(savedir)
model.save_weights(savedir + 'model01.h5')