1、多输出模型
使用keras函数式API构建网络:
# 输入层
inputs = tf.keras.layers.Input(shape=(64,64,3))
# 卷积层及全连接层等相关层
x = tf.keras.layers.Dense(256, activation=tf.nn.relu)(inputs)
# 多输出,定义两个输出,指定名字标识
fc_a=tf.keras.layers.Dense(name='fc_a',units=CLASS_NUM,activation=tf.nn.softmax)(x)
fc_b=tf.keras.layers.Dense(name='fc_b',units=CLASS_NUM,activation=tf.nn.softmax)(x)
# 单输入多输出
model = tf.keras.Model(inputs=inputs, outputs=[fc_a, fc_b])
# 目标函数定义,需与输出层名字对应
losses = {'fc_a': 'categorical_crossentropy',
'fc_b': 'categorical_crossentropy'}
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=losses,
metrics=['accuracy'])
2、自定义loss函数
def loss_a(y_true, y_pred):
return tf.keras.losses.categorical_crossentropy(y_true, y_pred)
def loss_b(y_true, y_pred):
return tf.keras.losses.meas_squared_error(y_true, y_pred)
losses = {'fc_a': loss_a,
'fc_b': loss_b}
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=losses,
metrics=['accuracy'])
3、批量训练
# data_generator返回的标签形式要是与多输出的数量对应的数组
def data_generator(sample_num, batch_size):
while True:
max_num = sample_num - (sample_num % batch_size)
for i in range(0, max_num, batch_size):
...
yield (batch_x, [batch_a, batch_b])
model.fit_generator(generator=data_generator(sample_num, batch_size),
steps_per_epoch=sample_num//batch_size,
epoches=EPOCHES,
verbose=1)
4、调试
在自定义的loss函数中,是以Sequence的方式来输入的,如果想调试查看loss的计算过程中的输出,直接print是无法打印值的,这是因为tensorflow的每次op都要以sess为基础来启动,如果想调试,可以用eager_execution模式:
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
np.set_printoptions(threshold=np.nan) # 输出所有元素