通常认为神经网络的深度对性能非常重要,但是网络越深其训练难度越大。Highway Network
的目标就是解决极深的神经网络难以训练的问题。Highway Network相当于修改了每一层的激活函数,即允许保留一定比例的原始输入,前一层的信息有一定比例可以不经过矩阵乘法和非线性变换,直接传输到下一层,但是需要学习如何控制网络中的信息流,即学习原始信息的保留比例。
ResNet和Highway Network非常类似,允许原始输入信息直接传输到后面的层中。
在不断加深网络深度时,会出现几个问题:
Degradation问题
,准确率会先上升达到饱和后,再持续增加深度会导致准确率下降。- 同时可能会存在
梯度弥散问题
假定某段神经网络的输入是x
,期望输出是H(x)
,如果直接把输入传输到输出作为初始结果,那么此时学习的目标就是H(x) - x
,这就是ResNet的残差学习。
ResNet有很多旁路的支线将输入直接连接连接到后面的层,使得后面的层可以直接学习残差,这种结构也被称为shortcut
或skip connection
。
图中
preact
是输入,左边支路是一个将输入直接传输到输出层的shorcut
通路,右边是卷积层,从图中可以看出输入的维度是32x56x56x64
,shorcut
的维度是32x56x56x256
,卷积层的输出也是32x56x56x256
,保持了维度一样。图2是图1中的shortcut节点的展开,此处使用了1x1的卷积核,改变了输出的通道数,使得最终的维度一样。下图3中的维度信息就一样,所以就没有进行1x1的卷积操作。
图4就是一个普通的卷积层,连线上标注了维度
图5中的红框中的一个unit_x单元展开之后就是一个图1的样子。
下面是使用tensorflow的实现代码:
# 定义ResNet基本模块组的数据结构
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
'A named tuple describing a ResNet block'
# 降采样
def subsample(inputs, factor, scope=None):
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
# 卷积层
def conv2d_same(inputs, num_outputs, kernel_size, stride, scope=None):
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME', scope=scope)
else:
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, padding='VALID', scope=scope)
# 堆叠Block
@slim.add_arg_scope
def stack_arg_dense(net, blocks, outputs_collections=None):
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
unit_depth, unit_depth_bottleneck, unit_stride = unit
net = block.unit_fn(net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=unit_stride)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
# 定义ResNet通用的arg_scope
def resnet_arg_scope(is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': is_training,
'decay': batch_norm_decay,
'epsilon': batch_norm_scale,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with slim.arg_scope([slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
# 核心的bottleneck残差学习单元
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, outputs_collections=None, scope=None):
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None, scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = conv2d_same(residual, depth_bottleneck, 3, stride, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None, scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
# 生成ResNet的主函数
def resnet_v2(inputs, blocks, num_classes=None, global_pool=True, include_root_block=True, reuse=None, scope=None):
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_point_collections = sc.original_name_scope + 'end_points'
with slim.arg_scope([slim.conv2d, bottleneck, stack_arg_dense], outputs_collections=end_point_collections):
net = inputs
if include_root_block: # 是否添加最前面的7x7卷积和最大池化层
with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
net = conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = stack_arg_dense(net, blocks)
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
if global_pool: # 是否使用最后一层全局平均池化
net = tf.reduce_mean(net, [1, 1], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
end_points = slim.utils.convert_collection_to_dict(end_point_collections)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
# 152层的ResNet
def resnet_v2_152(inputs, num_classes=None, global_pool=None, reuse=None, scope='resnet_v2_152'):
blocks = [Block('block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
Block('block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(1024, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, global_pool, include_root_block=True, reuse=reuse, scope=scope)