20180422 qzd
ch03 - 手写数字的数据集MNIST上
- 打开文件并获取其中的内容
(得到的是文本形式)
data_file = open("mnist_dataset/mnist_train_100.csv",'r')
data_list = data_file.readlines() #readline()会将整个文件读取到内存中
data_file.close()
- 列表元素
len(data_list)
输出结果:
100
data_list[4]
输出结果:
'5,0,0,……,0,0\n'
解释说明:
‘标签+像素(784--28*28)’
- 列表 --> 数组
(将使用逗号分隔的数字列表转换成合适的数组)
- 在都逗号处进行切分
- 忽略第一个标签值
- 绘制数组
#导入python扩展库
import numpy as np
import matplotlib.pyplot
%matplotlib inline
all_values= data_list[0].split(',') #-->['5','0',……,'0\n']
#文件是以文本的形式读取的,每一行或每一条记录依然是文本。由逗号分割每一行得到的仍然是文本片段。
#np.asfarray()将文本字符串转换成实数,并创建这些数字的数组
image_array = np.asfarray(all_values[1:]).reshape((28,28)) #除第一个元素以外的所有值(忽略第一个标签值)--> [[ 0. 0. ……][ 0. 0. ……]……[ …… 0. 0.]]
matplotlib.pyplot.imshow(image_array,cmap='Greys',interpolation='None')
输出结果:
- 准备MNIST训练数据
- 用于训练和查询的输入数据(缩放)
(0 ~ 255 --> 0.01 ~ 1.0, 刻意选择0.01作为范围最低点,是为了避免先前观察到的0值输入最终会人为地造成权重更新失败。没有选择0.99作为输入的上限值,是因为不需要避免输入1.0会造成这个问题。只需要避免输出值为1.0)
#scale input to range 0.01 to 1.00
scaled_input = (np.asfarray(all_values[1:]) / 255.0 *0.99) + 0.01
print(scaled_input)
- 用于训练的输出数据
(试图让神经网络生成0和1的输出,对于激活函数而言是不可能的,这会导致大的权重和饱和网络,因此需要重新调整这些数字。将使用值0.01和0.99来代替0和1)
#output nodes is 10 (example)
onodes = 10
targets = np.zeros(onodes) + 0.01
targets[int(all_values[0])] = 0.99
print(targets)
#输出结果:[ 0.01 0.01 0.01 0.01 0.01 0.99 0.01 0.01 0.01 0.01]
- 训练网络(完整代码)
#python notebook for Make Your Own Neural Network
#code for a 3-layer neural network, and code for learning the MNIST dataset
import numpy as np
#scipy.special for the sigmoid function expit()
import scipy.special
#library for plotting arrays
import matplotlib.pyplot
#ensure the plots are inside this notebook, not an external window
%matplotlib inline
#neural network class definition
class neuralNetwork:
#initialise the neural network
def __init__(self,inputnodes, hiddennodes, outputnodes, learningrate):
#set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
#link weight matrices, wih and who
#weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21
# w12 w22 ect
self.wih = np.random.normal(0.0, pow(self.hnodes, -0.5),(self.hnodes, self.inodes))
self.who = np.random.normal(0.0, pow(self.onodes, -0.5),(self.onodes, self.hnodes))
#learning rate
self.lr = learningrate
#activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
pass
#train the neural network
def train(self, inputs_list, targets_list):
#convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#calculate signals into hidden layer
hidden_inputs = np.dot(self.wih,inputs)
#caculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
#calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
#calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
#output layer error is the (target - actual)
output_errors = targets - final_outputs
#hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = np.dot(self.who.T, output_errors)
#updata the weights for the links between the hidden and output layers
self.who += self.lr * np.dot((output_errors * final_outputs * (1.0 - final_outputs)), np.transpose(hidden_outputs))
#update the weights for the links between the input and hidden layers
self.wih += self.lr *np.dot((hidden_errors * hidden_outputs*(1.0 - hidden_outputs)),np.transpose(inputs))
pass
#query the neural network
def query(self, inputs_list):
#convert inputs list to 2d array
inputs = np.array(inputs_list,ndmin=2).T
#calculate signals into hidden layer
hidden_inputs = np.dot(self.wih,inputs)
#calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
#calculate signals into final output layer
final_inputs = np.dot(self.who,hidden_outputs)
#calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
# number of input , hidden and output nodes
input_nodes = 784
hidden_nodes = 100
output_nodes = 10
#learning rate is 0.3
learning_rate = 0.3
#create instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
#load the mnist training data CSV file into a list
training_data_file = open("mnist_dataset/mnist_train_100.csv",'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
#train the neural network
#go through all records in the training data set
for recode in training_data_list:
#split the recode by the ',' commas
all_values = recode.split(',')
#scale and shift the inputs
inputs = (np.asfarray(all_values[1:])/255.0*0.99)+0.01
#creat the target output values (all 0.01, except the desired label which is 0.99)
targets = np.zeros(output_nodes)+0.01
#all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
n.train(inputs,targets)
pass
- 测试网络1(一个测试)
#load the mnist test data CSV file into a list
test_data_file = open("mnist_dataset/mnist_test_10.csv",'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
#get the first test record
all_values = test_data_list[0].split(',')
#print the label
print(all_values[0])
#输出结果:7
image_array = np.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array,cmap='Greys',interpolation='None')
输出结果:
n.query((numpy.asfarray(all_values[1:])/255.0*0.99)+0.01)
输出结果:array([[ 0.04266137],
[ 0.01119368],
[ 0.01747274],
[ 0.09164112],
[ 0.06795952],
[ 0.04385833],
[ 0.00848533],
[ 0.87066384],
[ 0.03463805],
[ 0.05105387]])
- 测试网络2(多个测试)
#test the neural network
#scorecard for how well the network performs, initially empty
scorecard = []
#go through all the records in the test data set
for record in test_data_list:
#split the record by the ',' commas
all_values = record.split(',')
#correct answer is first value
correct_label = int(all_values[0])
print(correct_label, 'correct_label')
#scale and shift the inputs
inputs = (np.asfarray(all_values[1:])/255.0*0.99)+0.01
#query the network
outputs = n.query(inputs)
#the index of the highest value corresponds to the label
label = np.argmax(outputs)
print(label,"network's answer")
#append correct or incorrect to list
if(label == correct_label):
#network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
#network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
pass
print(scorecard)
#calculate the performance score, the fraction of correct answers
scorecard_array = np.asarray(scorecard) #将列表转化成数组
print(scorecard_array)
print("performance = ",scorecard_array.sum()/scorecard_array.size)
输出结果:
7 correct_label
7 network's answer
2 correct_label
0 network's answer
1 correct_label
1 network's answer
0 correct_label
0 network's answer
4 correct_label
4 network's answer
1 correct_label
1 network's answer
4 correct_label
4 network's answer
9 correct_label
4 network's answer
5 correct_label
4 network's answer
9 correct_label
7 network's answer
[1, 0, 1, 1, 1, 1, 1, 0, 0, 0]
[1 0 1 1 1 1 1 0 0 0]
performance = 0.6