-- coding: utf-8 --
"""
Created on Sat Oct 27 18:59:53 2018
@author: Plenari
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sc
import seaborn as sns
x=np.arange(1,10)
x=np.array([np.ones_like(x),x,x**2]).T
y=np.dot(x,np.array([[1,2,3]]).T)
sns.set()
normal
x0=np.mat(x)
w_normal=(x0.Tx0).I(x0.T)np.mat(y.reshape(-1,1))
plt.plot(y,'')
plt.plot(x0*w_normal,'-')
plt.show()
gradenscent 梯度下降
learn=1e-4#learning
'''
线性回归的梯度下降
'''
w=np.ones([len(x[0]),1])
m,n=x.shape
for i in range(100):
hypo=np.dot(x,w)
loss=hypo-y
gradient=np.dot(x.T,loss)/m
loss=np.sqrt(np.mean(loss**2))
if i%50==0:
print(i,loss)
w=w-learn*gradient
'''
sklearn
'''
from sklearn.linear_model import LinearRegression
lr=LinearRegression()
lr.fit(x,y)
print(lr.coef_)
>>> [[0. 2. 3.]]
pytorch
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class LR(nn.Module):
def __init__(self):
super(LR, self).__init__()
self.linear=nn.Linear(3,1,bias=False)
def forward(self, x):
x = self.linear(x)
return x
'''
用pytorch实
'''
model = LR()
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr = 0.0001)
x=torch.from_numpy(x.astype('float32'))
y=torch.from_numpy(y.astype('float32'))
loss0=1e9
for i in range(200000):
optimizer.zero_grad() # 首先梯度清零(与 net.zero_grad() 效果一样)
output = model(x)
loss = criterion(output,y)
loss.backward()
optimizer.step() # 更新参数
if np.abs(loss.item()-loss0)<1e-9:
print(loss.item())
break
loss0=loss.item()
print(model.linear.state_dict())
plt.plot(y.data.numpy().flatten(),'*')
plt.plot(model(x).data.numpy().flatten())
plt.title('pytorch')
plt.show()