logistic Regression
import torch
from torchimport nn
import numpyas np
torch.manual_seed(1)
torch.set_default_tensor_type('torch.FloatTensor')
num_inputs =2
num_examples =1000
true_w = [2, -3.4]
true_b =4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
import torch.utils.dataas Data
batch_size =10
# combine featues and labels of dataset
dataset = Data.TensorDataset(features, labels)
# put dataset into DataLoader
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # whether shuffle the data or not
num_workers=2, # read data in multithreading
)
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()# call father function to init
self.linear = nn.Linear(n_feature, 1)# function prototype: `torch.nn.Linear(in_features, out_features, bias=True)`
def forward(self, x):
y =self.linear(x)
return y
net = nn.Sequential(
nn.Linear(num_inputs, 1)
# other layers can be added here
)
from torch.nnimport init
init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0)# or you can use `net[0].bias.data.fill_(0)` to modify it directly
loss = nn.MSELoss()# nn built-in squared loss function
# function prototype: `torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')`
import torch.optimas optim
optimizer = optim.SGD(net.parameters(), lr=0.03)# built-in random gradient descent function
print(optimizer)# function prototype: `torch.optim.SGD(params, lr=, momentum=0, dampening=0, weight_decay=0, nesterov=False)`
num_epochs =3
for epochin range(1, num_epochs +1):
for X, yin data_iter:
print(X.shape)
print(y.shape)
output = net(X)
print(output.shape)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad()# reset gradient, equal to net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
Softmax Regression
加载各种包或者模块
import torch
from torchimport nn
from torch.nnimport init
import torchvision
import torchvision.transformsas transforms
import numpyas np
import sys
def sgd(params, lr, batch_size):
# 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经
# 沿batch维求了平均了。
for paramin params:
param.data -= lr * param.grad / batch_size# 注意这里更改param时用的param.data
def load_data_fashion_mnist(batch_size, root='~/Datasets/FashionMNIST'):
"""Download the fashion mnist dataset and then load into memory."""
transform = transforms.ToTensor()
mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
if sys.platform.startswith('win'):
num_workers =0 # 0表示不用额外的进程来加速读取数据
else:
num_workers =4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_iter, test_iter
train_iter,test_iter = load_data_fashion_mnist(128)
for x,yin train_iter:
print(x.shape)
print(y.shape)
def evaluate_accuracy(data_iter, net):
acc_sum, n =0.0, 0
for X, yin data_iter:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
batch_size =256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
num_inputs =784
num_outputs =10
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epochin range(num_epochs):
train_l_sum, train_acc_sum, n =0.0, 0.0, 0
for X, yin train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizeris not None:
optimizer.zero_grad()
elif paramsis not None and params[0].gradis not None:
for paramin params:
param.grad.data.zero_()
l.backward()
if optimizeris None:
sgd(params, lr, batch_size)
else:
optimizer.step()# “softmax回归的简洁实现”一节将用到
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch +1, train_l_sum / n, train_acc_sum / n, test_acc))
class LinearNet(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(LinearNet, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x):# x 的形状: (batch, 1, 28, 28)
y =self.linear(x.view(x.shape[0], -1))
return y
# net = LinearNet(num_inputs, num_outputs)
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x):# x 的形状: (batch, *, *, ...)
return x.view(x.shape[0], -1)
from collectionsimport OrderedDict
net = nn.Sequential(
# FlattenLayer(),
# LinearNet(num_inputs, num_outputs)
OrderedDict([
('flatten', FlattenLayer()),
('linear', nn.Linear(num_inputs, num_outputs))])# 或者写成我们自己定义的 LinearNet(num_inputs, num_outputs) 也可以
)
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
loss = nn.CrossEntropyLoss()# 下面是他的函数原型
# class torch.nn.CrossEntropyLoss(weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)# 下面是函数原型
# class torch.optim.SGD(params, lr=, momentum=0, dampening=0, weight_decay=0, nesterov=False)
num_epochs =5
train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
Perception
# 加载各种包或者模块
import torch
from torchimport nn
from torch.nnimport init
import torchvision
import torchvision.transformsas transforms
import numpyas np
import sys
def sgd(params, lr, batch_size):
# 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经
# 沿batch维求了平均了。
for paramin params:
param.data -= lr * param.grad / batch_size# 注意这里更改param时用的param.data
class FlattenLayer(torch.nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x):# x shape: (batch, *, *, ...)
return x.view(x.shape[0], -1)
def load_data_fashion_mnist(batch_size, root='~/Datasets/FashionMNIST'):
"""Download the fashion mnist dataset and then load into memory."""
transform = transforms.ToTensor()
mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
if sys.platform.startswith('win'):
num_workers =0 # 0表示不用额外的进程来加速读取数据
else:
num_workers =4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_iter, test_iter
train_iter,test_iter = load_data_fashion_mnist(128)
for x,yin train_iter:
print(x.shape)
print(y.shape)
def evaluate_accuracy(data_iter, net):
acc_sum, n =0.0, 0
for X, yin data_iter:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
batch_size =256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
num_inputs =784
num_outputs =10
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epochin range(num_epochs):
train_l_sum, train_acc_sum, n =0.0, 0.0, 0
for X, yin train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizeris not None:
optimizer.zero_grad()
elif paramsis not None and params[0].gradis not None:
for paramin params:
param.grad.data.zero_()
l.backward()
if optimizeris None:
sgd(params, lr, batch_size)
else:
optimizer.step()# “softmax回归的简洁实现”一节将用到
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch +1, train_l_sum / n, train_acc_sum / n, test_acc))
num_inputs, num_outputs, num_hiddens =784, 10, 256
net = nn.Sequential(
FlattenLayer(),
nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.Linear(num_hiddens, num_outputs),
)
for paramsin net.parameters():
init.normal_(params, mean=0, std=0.01)
batch_size =256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
num_epochs =5
train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)