这段代码是我学习PyTorch的时候参照教材写的,记录一下学习过程。
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
# 创建训练数据
x_train = np.array([[3.3],[4.4],[5.5],[6.71],[6.93],[4.168],
[9.779],[6.182],[7.59],[2.167],[7.042],
[10.791],[5.313],[7.997],[3.1]],dtype=np.float32)
y_train = np.array([[1.7],[2.76],[2.09],[3.19],[1.694],[1.573],
[3.366],[2.596],[2.53],[1.221],[2.827],
[3.465],[1.65],[2.904],[1.3]],dtype=np.float32)
# 画出来看看
# axs = plt.plot(x_train,y_train,'ob')
# plt.show()
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
# 定义线性回归模型
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression,self).__init__()
self.liner = nn.Linear(1,1) #输入和输出都是1
def forward(self,x):
out = self.liner(x)
return out
# 模型初始化
if torch.cuda.is_available():
model = LinearRegression().cuda()
else:
model = LinearRegression()
# 定义损失函数和优化函数
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.001)
# 开始训练模型
num_epochs = 1000
for epoch in range(num_epochs):
if torch.cuda.is_available():
inputs = Variable(x_train).cuda()
target = Variable(y_train).cuda()
else:
inputs = Variable(x_train)
target = Variable(y_train)
# forward
out = model(inputs)
loss = criterion(out,target)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1)%20 ==0:
print('Epoch[{}/{}],loss:{:.6f}'
.format(epoch+1,num_epochs,loss.item()))
print(model.liner.weight.item(),model.liner.bias.item())
# 预测结果
model.eval()
predict = model(Variable(x_train))
predict = predict.data.numpy()
# 画出回归结果
plt.plot(x_train.numpy(),y_train.numpy(),'ob')
plt.plot(x_train.numpy(),predict)
plt.show()