1、添加数据统一化提高准确率
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
import os
import time
import sklearn
import tensorflow as tf
from tensorflow import keras # 从tf中导入keras,即tf.keras 若想纯keras需导入import keras
# modelu模块
for modelu in mpl, np, pd, sklearn, tf, keras:
print(modelu.__name__, modelu.__version__)
-
1.使用tf.keras处理数据,导入数据、了解数据集
# 图像分类数据集(时装:fashion MNIST:手写数字识别、代码实现、数据集、图像)
fashion_mnist = keras.datasets.fashion_mnist
# 数据分为验证集、训练集、测试集
# 分训练集和测试
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
# 将训练集再次拆分为训练集和验证集
# 前5000条数据作为验证集
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
print('x_valid y_valid:', x_valid.shape, y_valid.shape)
print('x_train y_train:', x_train.shape, y_train.shape)
print('x_test y_test:', x_test.shape, y_test.shape)
print(np.max(x_train), np.min(x_train))
255 0
-
添加数据统一化:提高准确率
# x = (x - u) / std
# u均值、std方差、这样数据就服从均值为0方差为1的正态分布
# 使用sklearn实现归一化
from sklearn.preprocessing import StandardScaler
# 初始化
scaler = StandardScaler()
#训练集归一化
# x_train:三维的,归一化时要转成二维的,归一化完再转回三维的
# ([None, 28, 28] -> [None, 784]) ->[None, 28, 28]
# 数据原来是np.int要转成np.float32
# 训练集使用fit_transform会将均值和方差记录下来,同时验证集和测试集也会使用这个均值方差,这样归一化效果会好
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_valid_scaled = scaler.transform(x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_test_scaled = scaler.transform(x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
print(np.max(x_train_scaled), np.min(x_train_scaled))
2.0231433 -0.8105136
-
2.使用tf.keras构建分类模型
# 使用tf.keras.models.Sequential()构建分类模型
# 1、创建Sequention()对象
model = keras.models.Sequential()
# 2、添加层次:输入层、全连接层、输出层
# 2.1输入层:输入是28*28的图像,Flatten表示展屏:将28*28的二维矩阵展屏为一维向量
model.add(keras.layers.Flatten(input_shape=[28,28]))
# 2.2全连接层:神经网络中一层,下一层和上一层一一链接,单元数300,activation激活函数
model.add(keras.layers.Dense(300, activation="relu"))
# 再加一层全连接层,100个单元数,和上一层做连接
model.add(keras.layers.Dense(100, activation="relu"))
# 2.3 输出层:输出是长度为10的向量,activation激活函数softmax
model.add(keras.layers.Dense(10, activation="softmax"))
"""
# 还可以在初始化时构建模型
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28,28],
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
"""
# activation:激活函数
# relu:y = max(0, x) :x小于0,返回0;x大于0,返回x
# softmax:将向量变成概率分布
# 向量:x=[x1,x2,x3]
# sum = e^x1 + e^x2 + e^x3
# 概率分布:y = [e^x1/sum, e^x2/sum,e^x3/sum] :概率值在0-1之间,和为1
# 3、损失函数、求解方法、度量指标
# reason for sparse:y->index. y->on_hot->[]
model.compile(loss="sparse_categorical_crossentropy" #crossentropy交叉熵损失函数、categorical分类
,optimizer="adam" # 求解方法:传统随机梯度下降(SGD)、adam为sgd的一阶优化算法
,metrics=["accuracy"]) # 度量:精确度
-
3.使用tf.keras训练分类模型
# 传入训练集、遍历10次数据集、验证集验证
history = model.fit(x_train_scaled, y_train, epochs=10, validation_data=(x_valid_scaled, y_valid))
Train on 55000 samples, validate on 5000 samples
Epoch 1/10
55000/55000 [==============================] - 4s 80us/sample - loss: 0.4566 - accuracy: 0.8332 - val_loss: 0.3797 - val_accuracy: 0.8598
Epoch 2/10
55000/55000 [==============================] - 4s 74us/sample - loss: 0.3552 - accuracy: 0.8692 - val_loss: 0.3384 - val_accuracy: 0.8760
Epoch 3/10
55000/55000 [==============================] - 4s 76us/sample - loss: 0.3158 - accuracy: 0.8833 - val_loss: 0.3353 - val_accuracy: 0.8818
Epoch 4/10
55000/55000 [==============================] - 4s 76us/sample - loss: 0.2931 - accuracy: 0.8908 - val_loss: 0.3324 - val_accuracy: 0.8794
Epoch 5/10
55000/55000 [==============================] - 4s 77us/sample - loss: 0.2726 - accuracy: 0.8971 - val_loss: 0.3159 - val_accuracy: 0.8892
Epoch 6/10
55000/55000 [==============================] - 4s 75us/sample - loss: 0.2586 - accuracy: 0.9038 - val_loss: 0.3083 - val_accuracy: 0.8880
Epoch 7/10
55000/55000 [==============================] - 5s 83us/sample - loss: 0.2430 - accuracy: 0.9085 - val_loss: 0.3229 - val_accuracy: 0.8876
Epoch 8/10
55000/55000 [==============================] - 4s 81us/sample - loss: 0.2297 - accuracy: 0.9133 - val_loss: 0.3180 - val_accuracy: 0.8888
Epoch 9/10
55000/55000 [==============================] - 5s 82us/sample - loss: 0.2192 - accuracy: 0.9168 - val_loss: 0.3165 - val_accuracy: 0.8952
Epoch 10/10
55000/55000 [==============================] - 4s 82us/sample - loss: 0.2084 - accuracy: 0.9225 - val_loss: 0.3409 - val_accuracy: 0.8898
-
4.指标图示打印
#### 4.指标图示打印
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True) # 显示网格
plt.gca().set_ylim(0, 1) #设置y范围
plt.show()
plot_learning_curves(history)
image.png
-
5.测试集上评估
model.evaluate(x_test_scaled, y_test)
10000/10000 [==============================] - 0s 38us/sample - loss: 0.3600 - accuracy: 0.8793
[0.3600435515999794, 0.8793]