线性拟合代码

# -*- coding:utf-8 -*-

import numpy as np

def MeanSquareError(w, b, points):
    total_error = 0
    for i in range(0, len(points)):
        x = points[i][0]
        y = points[i][1]
        total_error += (y - (w * x + b)) ** 2
    return total_error / float(len(points))

def StepGradient(w, b, points, learning_rate):
    b_gradient = 0
    w_gradient = 0
    N = float(len(points))
    for i in range(0, len(points)):
        x = points[i][0]
        y = points[i][1]
        b_gradient += (2/N) * ((w * x + b) - y)
        w_gradient += (2/N) * x * ((w * x + b) - y)
    b = b - (learning_rate * b_gradient)
    w = w - (learning_rate * w_gradient)
    return w, b

def GradientDescent(points, w_init, b_init, learning_rate, iter_num):
    w = w_init
    b = b_init
    for step in range(iter_num):
        w, b = StepGradient(w, b, points, learning_rate)
        loss = MeanSquareError(w, b, points)
        if (step % 50 == 0):
            print(f"iteration:{step}, loss:{loss}, w:{w}, b:{b}")
    return w, b

def generate_samples():
    points = []
    for i in range(100):
        x = np.random.uniform(-10., 10.)
        eps = np.random.normal(0., 0.01)
        y = 1.477 * x + 0.089 + eps
        points.append([x, y])
    return points
        

if __name__ == "__main__":
    learning_rate = 0.01
    w_init = 0
    b_init = 0
    iter_num = 1000
    
    points = generate_samples()
    w, b = GradientDescent(points, w_init, b_init, learning_rate, iter_num)
    loss = MeanSquareError(w, b, points)
    print(f'Final loss:{loss}, w:{w}, b:{b}')
    
    

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

相关阅读更多精彩内容

友情链接更多精彩内容