一个完整的神经网络训练总体流程:
1.定义神经网络
2.输入数据进行迭代
3.损失函数计算损失
4.梯度反向传播
5.更新网络权重参数
定义神经网络
import torch
import torch.nn.functional as F
#第一种定义方式
class Net(torch.nn.Module):
def __init__(self,n_feature,n_hidden,n_output):
super(Net,self).__init__()
self.hidden = torch.nn.Linear(n_feature,n_hidden)
self.out = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
x = F.relu(self.hidden(x))
x = self.out(x)
return x
net1 = Net(n_feature=2,n_hidden=10,n_output=2)
print(net)
#第二种定义方式
net2 = torch.nn.Sequential(
torch.nn.Linear(2,10),
torch.nn.ReLU(),
torch.nn.Linear(10,2)
)
print(net2)
输入数据进行迭代
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data,1)
y0 = torch.zeros(100)
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100)
x = torch.cat((x0,x1),0).type(torch.FloatTensor)
y = torch.cat((y0,y1),).type(torch.LongTensor)
x,y = Variable(x),Variable(y)
out=net(x)
损失函数计算损失
损失函数包括L1损失函数、MSE损失函数、交叉熵损失函数等
loss = torch.nn.CrossEntropyLoss()
loss(out,y)
梯度下降反向传播
optim = torch.nn.optim.SGD(net.parameters(),lr=0.02)
optim.zero_grad()
loss.backward()
#更新参数
optim.step()
参考链接:https://github.com/MorvanZhou/PyTorch-Tutorial/tree/master/tutorial-contents