Pytorch深度学习-用DenseNet训练CIFAR10数据集

CIFAR10数据集来源:torchvision.datasets.CIFAR10()

CIFAR10数据集是一个用于识别普适物体的小型数据集,一共包含10个类别的RGB彩色图片,图片尺寸大小为32x32,如图:

CIFAR10.png

相较于MNIST数据集,MNIST数据集是28x28的单通道灰度图,而CIFAR10数据集是32x32的RGB三通道彩色图,CIFAR10数据集更接近于真实世界的图片。

1. 数据集构建

每个像素点即每条数据中的值范围为0-255,有的数字过大不利于训练且难以收敛,故将其归一化到(0-1)之间

# 数据集处理

# transforms.RandomHorizontalFlip(p=0.5)---以0.5的概率对图片做水平横向翻转
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5),
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])

# transforms.ToTensor()---shape从(H,W,C)->(C,H,W), 每个像素点从(0-255)映射到(0-1):直接除以255
# transforms.Normalize---先将输入归一化到(0,1),像素点通过"(x-mean)/std",将每个元素分布到(-1,1)
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(std=(0.485, 0.456, 0.406), mean=(0.226, 0.224, 0.225))])

train_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=True, transform=transform_train,
                                 download=True)
test_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=False, transform=transform,
                                download=True)

train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)



2.构建DenseNet网络模型

1)构建 DenseNet-Block单元

class Bottleneck(nn.Module):
    def __init__(self, input_channel, growth_rate):
        super(Bottleneck, self).__init__()

        self.bn1 = nn.BatchNorm2d(input_channel)
        self.relu1 = nn.ReLU(inplace=True)

        self.conv1 = nn.Conv2d(input_channel, 4 * growth_rate, kernel_size=1)
        self.bn2 = nn.BatchNorm2d(4 * growth_rate)
        self.relu2 = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3, padding=1)

    def forward(self, x):
        out = self.conv1(self.relu1(self.bn1(x)))
        out = self.conv2(self.relu2(self.bn2(out)))
        out = torch.cat([out, x], 1)
        return out

2)Transition模块---连接两个DenseNet_Block

class Transition(nn.Module):
    def __init__(self, input_channels, out_channels):
        super(Transition, self).__init__()

        self.bn = nn.BatchNorm2d(input_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv = nn.Conv2d(input_channels, out_channels, kernel_size=1)

    def forward(self, x):
        out = self.conv(self.relu(self.bn(x)))
        out = F.avg_pool2d(out, 2)
        return out

3. 构建损失函数和优化器

损失函数采用CrossEntropyLoss
优化器采用 SGD 随机梯度优化算法

# 构造损失函数和优化器
criterion = nn.CrossEntropyLoss()
opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.8, weight_decay=5e-4)

# 动态更新学习率------每隔step_size : lr = lr * gamma
schedule = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.6, last_epoch=-1)

4.完整代码

import math
import torch.nn.functional as F
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from matplotlib import pyplot as plt
import time

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# transforms.RandomHorizontalFlip(p=0.5)---以0.5的概率对图片做水平横向翻转
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5),
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])

# transforms.ToTensor()---shape从(H,W,C)->(C,H,W), 每个像素点从(0-255)映射到(0-1):直接除以255
# transforms.Normalize---先将输入归一化到(0,1),像素点通过"(x-mean)/std",将每个元素分布到(-1,1)
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(std=(0.485, 0.456, 0.406), mean=(0.226, 0.224, 0.225))])

train_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=True, transform=transform_train,
                                 download=True)
test_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=False, transform=transform,
                                download=True)

train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)


class Bottleneck(nn.Module):
    def __init__(self, input_channel, growth_rate):
        super(Bottleneck, self).__init__()

        self.bn1 = nn.BatchNorm2d(input_channel)
        self.relu1 = nn.ReLU(inplace=True)

        self.conv1 = nn.Conv2d(input_channel, 4 * growth_rate, kernel_size=1)
        self.bn2 = nn.BatchNorm2d(4 * growth_rate)
        self.relu2 = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3, padding=1)

    def forward(self, x):
        out = self.conv1(self.relu1(self.bn1(x)))
        out = self.conv2(self.relu2(self.bn2(out)))
        out = torch.cat([out, x], 1)
        return out


class Transition(nn.Module):
    def __init__(self, input_channels, out_channels):
        super(Transition, self).__init__()

        self.bn = nn.BatchNorm2d(input_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv = nn.Conv2d(input_channels, out_channels, kernel_size=1)

    def forward(self, x):
        out = self.conv(self.relu(self.bn(x)))
        out = F.avg_pool2d(out, 2)
        return out


class DenseNet(nn.Module):
    def __init__(self, nblocks, growth_rate, reduction, num_classes):
        super(DenseNet, self).__init__()

        self.growth_rate = growth_rate

        num_planes = 2 * growth_rate

        self.basic_conv = nn.Sequential(

            nn.Conv2d(3, 2 * growth_rate, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(2 * growth_rate),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        self.dense1 = self._make_dense_layers(num_planes, nblocks[0])
        num_planes += nblocks[0] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(num_planes, nblocks[1])
        num_planes += nblocks[1] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(num_planes, nblocks[2])
        num_planes += nblocks[2] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(num_planes, nblocks[3])
        num_planes += nblocks[3] * growth_rate

        self.AdaptiveAvgPool2d = nn.AdaptiveAvgPool2d(1)

        # 全连接层
        self.fc = nn.Sequential(

            nn.Linear(num_planes, 256),
            nn.ReLU(inplace=True),
            # 使一半的神经元不起作用,防止参数量过大导致过拟合
            nn.Dropout(0.5),

            nn.Linear(256, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),

            nn.Linear(128, 10)
        )

    def _make_dense_layers(self, in_planes, nblock):
        layers = []
        for i in range(nblock):
            layers.append(Bottleneck(in_planes, self.growth_rate))
            in_planes += self.growth_rate

        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.basic_conv(x)
        out = self.trans1(self.dense1(out))
        out = self.trans2(self.dense2(out))
        out = self.trans3(self.dense3(out))
        out = self.dense4(out)
        out = self.AdaptiveAvgPool2d(out)

        out = out.view(out.size(0), -1)
        
        out = self.fc(out)
        return out


def DenseNet121():
    return DenseNet([6, 12, 24, 16], growth_rate=32, reduction=0.5, num_classes=10)


def DenseNet169():
    return DenseNet([6, 12, 32, 32], growth_rate=32, reduction=0.5, num_classes=10)


def DenseNet201():
    return DenseNet([6, 12, 48, 32], growth_rate=32, reduction=0.5, num_classes=10)


def DenseNet265():
    return DenseNet([6, 12, 64, 48], growth_rate=32, reduction=0.5, num_classes=10)


# 初始化模型
model = DenseNet121().to(device)

# 构造损失函数和优化器
criterion = nn.CrossEntropyLoss()
opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.8, weight_decay=0.001)

# 动态更新学习率------每隔step_size : lr = lr * gamma
schedule = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.6, last_epoch=-1)

loss_list = []


# train
def train(epoch):
    start = time.time()
    for epoch in range(epoch):
        running_loss = 0.0
        for i, (inputs, labels) in enumerate(train_loader, 0):

            inputs, labels = inputs.to(device), labels.to(device)

            # 将数据送入模型训练
            outputs = model(inputs)
            # 计算损失
            loss = criterion(outputs, labels).to(device)

            # 重置梯度
            opt.zero_grad()
            # 计算梯度,反向传播
            loss.backward()
            # 根据反向传播的梯度值优化更新参数
            opt.step()

            # 100个batch的 loss 之和
            running_loss += loss.item()
            loss_list.append(loss.item())

            # 每100个 batch 查看一下 平均loss
            if (i + 1) % 100 == 0:
                print('epoch = %d , batch = %d , loss = %.6f' % (epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0

        # 每一轮结束输出一下当前的学习率 lr
        lr_1 = opt.param_groups[0]['lr']
        print("learn_rate:%.15f" % lr_1)
        schedule.step()
        verify()

    end = time.time()
    # 计算并打印输出你的训练时间
    print("time:{}".format(end - start))

    # 训练过程可视化
    plt.plot(loss_list)
    plt.ylabel('loss')
    plt.xlabel('Epoch')
    plt.savefig('./DenseNet_train_img.png')
    plt.show()


# Test
def verify():
    model.eval()
    correct = 0.0
    total = 0
    # 训练模式不需要反向传播更新梯度
    with torch.no_grad():
        print("===========================test===========================")
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)

            pred = outputs.argmax(dim=1)  # 返回每一行中最大值元素索引
            total += inputs.size(0)
            correct += torch.eq(pred, labels).sum().item()

    print("Accuracy of the network on the 10000 test images:%.2f %%" % (100 * correct / total))
    print("==========================================================")


if __name__ == '__main__':
    train(100)
    verify()
    # DenseNet: 所有卷积层全部使用使用3*3的卷积核, 两个3*3=一个5*5 同时可以减少参数量, 加深神经网络的深度
    # 使用 DenseNet 神经网络训练 CIFAR10 数据集



©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容