首先我们先来看看resnet34的模型图
不同深度的ResNet.jpg
详细的resnet34模型
resnet34详细结构
以上的相同的颜色卷积核相同但是注意虚线的部分残差,输入和输出维度不一样不能相加,需要在代码中定义downsample对输入进行卷积操作使其维度一致。
详细代码
import torch.nn as nn
import torch
from torch.autograd import Variable as V
# 定义最基本的残差模块
class ResidualBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3,
stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self,x):
identity = x
# downsample定义的是否是虚线的残差模块,如果是那么在x = F(x) + x时,
# 输入的x的维度和输出的F(x)的维度就不相等,需要对输入的x进行下采样操作。
if self.downsample is not None:
identity = self.downsample(x)
# 第一次卷积操作
out = self.conv1(x)
out = self.bn1(out) # 批归一化
out = self.relu(out) # 激活函数
# 第二次卷积操作
out = self.conv2(out)
out = self.bn2(out)
# 输出的模型加上输入的模型,
out += identity
out = self.relu(out)
return out
class ResNet34(nn.Module):
def __init__(self,num_classes=1000):
super(ResNet34,self).__init__()
#对输入的rgb三维的图像进行卷积
self.conv1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=7,stride=2,
padding=3,bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.max_pool = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
# 定义各层的残差结构
self.layer1 = self.make_layer(in_channel=64, out_channel=64,num_block=3,stride=1)
self.layer2 = self.make_layer(in_channel=64, out_channel=128,num_block=4,stride=2)
self.layer3 = self.make_layer(in_channel=128, out_channel=256,num_block=6,stride=2)
self.layer4 = self.make_layer(in_channel=256, out_channel=512,num_block=3,stride=2)
#上面输出的结果是7x7x512的向量,对这个向量进行平均池化的得到1x512的向量
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# 定义线性层,让512维的向量映射到1000维的维度上。
self.fc = nn.Linear(512, num_classes)
def make_layer(self,in_channel, out_channel,num_block,stride=1):
downsample = None
if stride==2:
downsample = nn.Sequential(
nn.Conv2d(in_channels=in_channel,out_channels=out_channel,
kernel_size=1,stride=2,bias=False),
nn.BatchNorm2d(out_channel)
)
layers=[]
# 每一层的第一个残差模块定义,需要定义downsample来确定是否需要对输入做变换
layers.append(ResidualBlock(in_channel=in_channel,out_channel=out_channel,
downsample=downsample,stride=stride))
#当前层的后面几个残差模块的输入和输出就是一样的。
for i in range(1,num_block):
layers.append(ResidualBlock(in_channel=out_channel,out_channel=out_channel,stride=1))
return nn.Sequential(*layers)
#定义正向传播的过程
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
model = ResNet34() # 创建ResNet残差网络结构的模型的实例
input = V(torch.randn(1,3,224,224)) # 创建输入数据,3表示的rgb三维图像
output = model(input) # 把数据传入模型,结果就是处理以后的数据
print(output.size()) # 输出运行的结果
输出结果
D:\python3\anaconda\envs\pytorch\python.exe D:/python3/PythonProgram/smartAbulm/resnet34.py
torch.Size([1, 1000])
Process finished with exit code 0