随机创建一些训练数据, , x --- [D_in, H], y--- [H, D_out]
import torch.nn as nn
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
创建两层神经网络
class TwoLayerNet(torch.nn.Module):
# 类的初始化操作
# 在pytorch 里面, init 函数定义的是模型需要那些网络
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H, bias=False)
self.linear2 = torch.nn.Linear(H, D_out, bias=False)
def forward(self, x):
y_pred = self.linear2(self.linear1(x).clamp(min=0))
return y_pred
初始化一个模型,并设置整个网络的损失函数, 优化器
model = TwoLayerNet(D_in, H, D_out)
loss_fn = nn.MSELoss(reduction='sum')
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
训练
for it in range(500):
# Forward pass
y_pred = model(x)
# 计算损失函数
loss = loss_fn(y_pred, y)
print(it, loss.item())
#
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新模型参数
optimizer.step()