批量梯度下降实现线性回归
import numpy as np
def Gradient_Descent(X, Y, theta, alpha = 0.01, interations = 200):
m, n = X.shape
theta = np.zeros(n)+0.1*np.random.randn()
for i in range(interations):
Y_predict = X.dot(theta.T)
loss = Y - Y_predict
loss_grad = np.sum(loss*X, axis=0)/m
theta = theta + alpha*loss_grad
return theta
def Prediction(X, theta):
return X.dot(theta.T)
随机梯度下降实现线性回归
import numpy as np
def Gradient_Descent(X, Y, theta, alpha = 0.01, interations = 200):
m, n = X.shape
theta = np.zeros(n)+0.1*np.random.randn()
for i in range(interations):
rand = np.random.randint(m)
x = X[rand]
y = Y[rand]
y_predict = x.dot(theta.T)
loss = y - y_predict
loss_grad = loss*x
theta = theta + alpha*loss_grad
return theta
def Prediction(X, theta):
return X.dot(theta.T)
小批量梯度下降实现线性回归
import numpy as np
def Gradient_Descent(X, Y, batch_size, theta, alpha = 0.01, interations = 200):
m, n = X.shape
theta = np.zeros(n)+0.1*np.random.randn()
if m%batch_size != 0:
X = np.concatenate((X,X[:m%batch_size]), axis=0)
for i in range(interations):
batch_num = m/batch_size
for j in range(batch_num):
mini_X = X[j*batch_size:j*(batch_size+1)]
Y_predict = mini_X.dot(theta.T)
loss = Y - Y_predict
loss_grad = np.sum(loss*mini_X, axis=0)/batch_size
theta = theta + alpha*loss_grad
return theta
def Prediction(X, theta):
return X.dot(theta.T)