# 一维线性回归
import numpy as np
import pylab as plt
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
x_train=np.array([[3.3],[4.4],[5.5],[6.71],[6.93],[4.168],[9.779],
[6.182],[7.59],[2.167],[7.042],[10.791],[5.313],
[7.997],[3.1]],dtype=np.float32)
y_train=np.array([[1.7],[2.76],[2.09],[3.19],[1.694],[1.573],[3.366],
[2.596],[2.53],[1.221],[2.827],[3.465],[1.65],[2.904],
[1.3]],dtype=np.float32)
x_train=torch.from_numpy(x_train)#先将numpy.array转化成Tensor
y_train=torch.from_numpy(y_train)
if torch.cuda.is_available():
x_train=Variable(x_train).cuda()#将数据变成Variable
y_train=Variable(y_train).cuda()
else:
x_train=Variable(x_train)
y_train=Variable(y_train)
#建立模型
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression,self).__init__()
self.linear=nn.Linear(1,1)#input and output is 1 dimension
def forward(self,x):
out=self.linear(x)
return out
#如果支持GPU加速,可以通过model.cuda()将模型放在GPU上
if torch.cuda.is_available():
model=LinearRegression().cuda()
else:
model=LinearRegression()
#定义损失函数和优化函数
criterion=nn.MSELoss()
optimizer=optim.SGD(model.parameters(),lr=1e-3)
#训练
num_epochs=1000
for epoch in range(num_epochs):
#forward
out=model(x_train)#前向传播
loss=criterion(out,y_train)#损失函数
#backward
optimizer.zero_grad()#归零梯度
loss.backward()#反向传播
optimizer.step()#更新参数
#预测
model.eval()#将模型变成测试模式
predict=model(x_train)#将测试数据放入网络做前向传播
plt.plot(x_train.data.numpy(),y_train.data.numpy(),'ro',label='Original data')
plt.plot(x_train.data.numpy(),predict.data.numpy(),label='Fitting Line')
plt.legend()
plt.show()
# 多项式回归
#定义好真实的函数
W_target=torch.Tensor([0.5,3,2.4]).unsqueeze(1)#是将原来的tensor大小由3变成(3,1)
b_target=torch.Tensor([0.9])
def f(x):
"""Approximated function."""
return x.mm(W_target)+b_target[0]#x.mm(W_target)表示做矩阵乘法
#首先需要预处理数据,也就是需要将数据变成一个矩阵的形式
def make_feature(x):
"""Builds features i.e. a matrix with columns [x,x^2,x^3]."""
x=x.unsqueeze(1)
return torch.cat([x**i for i in range(1,4)],1)#使用torch.cat()函数来实现Tensor的拼接
def get_batch(batch_size=32):
"""Builds a batch i.e. (x,f(x)) pair."""
random=torch.randn(batch_size)
x=make_feature(random.sort()[0])
y=f(x)
if torch.cuda.is_available():
return Variable(x).cuda(),Variable(y).cuda()
else:
return Variable(x),Variable(y)
#定义多项式模型
#Define model
class poly_model(nn.Module):
def __init__(self):
super(poly_model,self).__init__()
self.poly=nn.Linear(3,1)
def forward(self,x):
out=self.poly(x)
return out
if torch.cuda.is_available():
model=poly_model().cuda()
else:
model=poly_model()
#定义损失函数和优化器
criterion=nn.MSELoss()
optimizer=optim.SGD(model.parameters(),lr=1e-3)
#训练模型
epoch=0
while True:
#get data
batch_x,batch_y=get_batch()
#forward pass
output=model(batch_x)
loss=criterion(output,batch_y)
print_loss=loss.data[0]
#reset gradients
optimizer.zero_grad()
#Backward pass
loss.backward()
#update parameters
optimizer.step()
epoch+=1
if print_loss<1e-3:
break
batch_x,batch_y=get_batch(10000)
model.eval()
predict=model(batch_x)
plt.plot(batch_x.data.numpy()[:,0],batch_y.data.numpy(),'ro',label='Real Curve')
plt.plot(batch_x.data.numpy()[:,0],predict.data.numpy(),label='Fitting Curve')
plt.legend()
plt.show()