import time
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.__version__)
print(device)
1.11.0+cu113 cuda
稠密块
def conv_block(in_channels, out_channels):
blk = nn.Sequential(nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
return blk
class DenseBlock(nn.Module):
def __init__(self, num_convs, in_channels, out_channels):
super(DenseBlock, self).__init__()
net = []
for i in range(num_convs):
in_c = in_channels + i * out_channels
net.append(conv_block(in_c, out_channels))
self.net = nn.ModuleList(net)
self.out_channels = in_channels + num_convs * out_channels # 计算输出通道数
def forward(self, X):
for blk in self.net:
Y = blk(X)
X = torch.cat((X, Y), dim=1) # 在通道维上将输入和输出连结
return X
blk = DenseBlock(2, 3, 10)
X = torch.rand(4, 3, 8, 8)
Y = blk(X)
Y.shape
torch.Size([4, 23, 8, 8])
过渡层
def transition_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Conv2d(in_channels, out_channels, kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2))
return blk
blk = transition_block(23, 10)
blk(Y).shape
torch.Size([4, 10, 4, 4])
DenseNet模型
net = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
num_channels, growth_rate = 64, 32 # num_channels为当前的通道数
num_convs_in_dense_blocks = [4, 4, 4, 4]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
DB = DenseBlock(num_convs, num_channels, growth_rate)
net.add_module("DenseBlosk_%d" % i, DB)
# 上一个稠密块的输出通道数
num_channels = DB.out_channels
# 在稠密块之间加入通道数减半的过渡层
if i != len(num_convs_in_dense_blocks) - 1:
net.add_module("transition_block_%d" % i, transition_block(num_channels, num_channels // 2))
num_channels = num_channels // 2
net.add_module("BN", nn.BatchNorm2d(num_channels))
net.add_module("relu", nn.ReLU())
net.add_module("global_avg_pool", d2l.GlobalAvgPool2d()) # GlobalAvgPool2d的输出: (Batch, num_channels, 1, 1)
net.add_module("fc", nn.Sequential(d2l.FlattenLayer(), nn.Linear(num_channels, 10)))
X = torch.rand((1, 1, 96, 96))
for name, layer in net.named_children():
X = layer(X)
print(name, ' output shape:\t', X.shape)
0 output shape: torch.Size([1, 64, 48, 48]) 1 output shape: torch.Size([1, 64, 48, 48]) 2 output shape: torch.Size([1, 64, 48, 48]) 3 output shape: torch.Size([1, 64, 24, 24]) DenseBlosk_0 output shape: torch.Size([1, 192, 24, 24]) transition_block_0 output shape: torch.Size([1, 96, 12, 12]) DenseBlosk_1 output shape: torch.Size([1, 224, 12, 12]) transition_block_1 output shape: torch.Size([1, 112, 6, 6]) DenseBlosk_2 output shape: torch.Size([1, 240, 6, 6]) transition_block_2 output shape: torch.Size([1, 120, 3, 3]) DenseBlosk_3 output shape: torch.Size([1, 248, 3, 3]) BN output shape: torch.Size([1, 248, 3, 3]) relu output shape: torch.Size([1, 248, 3, 3]) global_avg_pool output shape: torch.Size([1, 248, 1, 1]) fc output shape: torch.Size([1, 10])
获取数据并训练模型
batch_size = 256
# 如出现“out of memory”的报错信息,可减小batch_size或resize
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
training on cuda epoch 1, loss 0.0020, train acc 0.834, test acc 0.749, time 27.7 sec epoch 2, loss 0.0011, train acc 0.900, test acc 0.824, time 25.5 sec epoch 3, loss 0.0009, train acc 0.913, test acc 0.839, time 23.8 sec epoch 4, loss 0.0008, train acc 0.921, test acc 0.889, time 24.9 sec epoch 5, loss 0.0008, train acc 0.929, test acc 0.884, time 24.3 sec
Comments NOTHING