train_ch3

num_epochs,lr=5,0.1

def train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,

            params=None,lr=None,trainer=None):

    for epoch in range(num_epochs):

        train_l_sum,train_acc_sum,n=0.0,0.0,0

        for x,y in train_iter:

            with autograd.record():

                y_hat=net(x)

                l=loss(y_hat,y).sum()

            l.backward()

            if trainer is None:

                d2l.sgd(params,lr,batch_size)

            else:

                trainer.step(batch_size) #softmax的简洁实现一节用到

            y=y.astype('float32')

            train_l_sum+=l.asscalar()

            train_acc_sum+=(y_hat.argmax(axis=1)==y).sum().asscalar()

            n+=y.size

        test_acc=evaluate_accuracy(test_iter,net)

        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'

              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))


train_ch3(net,train_iter,test_iter,cross_entropy,num_epochs,batch_size,[w,b],lr)

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容