序列化模型(rnn)对于自然语言处理和一些序列化的任务是非常有作用的,因为他们是存在“记忆”功能的
说明:
上标[l]:表示与第 l 层相关联的对象
上标(i):表示与第 i 个示例关联的对象
上标<t>:表示第 t 时间步处的对象
下标 i : 表示向量的第 i 项
导入我们所需的包
import numpy as np
from rnn_utils import *
1. 基本RNN的前向传播
RNN基本结构(本例Tx=Ty):
实现步骤
- 实现RNN的一个时间步骤所需的计算。
- 在Tx时间步上实现一个循环,一次处理一个输入值。
1.1 实现一个rnn单元
循环(递归)神经网络可以看作是单个细胞的重复。首先要实现一个时间步的计算。
实现步骤
- 计算隐藏状态的值a<t>
- 根据a<t>,计算预测值yhat<t>
- 将(a<t>, a<t-1>, x<t>, parameters)存储到cache中
- 返回值a<t>, y<t> and cache
我们将矢量化m个例子,x<t>尺寸(nx, m),a<t>尺寸(na, m)
def rnn_cell_forward(xt, a_prev, parameters):
"""
Arguments:
xt shape: (n_x, m).
a_prev shape: (n_a, m)
parameters -- python dictionary containing:
Wax -- shape (n_a, n_x)
Waa -- shape (n_a, n_a)
Wya -- shape (n_ y, n_a)
ba -- shape (n_a, 1)
by -- shape (n_y, 1)
Returns:
a_next -- shape (n_a, m)
yt_pred -- shape (n_y, m)
cache -- 元组,包含向后传递所需的值 (a_next, a_prev, xt, parameters)
"""
# 从"parameters"中检索参数
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
a_next = np.tanh(np.add(np.add(np.matmul(Wax,xt),np.matmul(Waa,a_prev)),ba))
yt_pred = softmax(np.add(np.matmul(Wya,a_next),by))
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
1.2 rnn向前传播
rnn可以看作将前面的rnn细胞进行重复,如果输入的数据序列经过10个时间步,那将复制rnn单元10次,每个单元都将前一个单元的隐藏状态a<t-1>和当前时间步的输入数据x<t>作为输入,它输出该时间步的隐藏状态a<t>和预测y<t>
实现步骤
- 创建一个零向量(a),它将存储由RNN计算的所有隐藏状态。
- 初始化隐藏状态a_next = a0
- 开始循环每个时间步,增量索引为t:
- 通过运行rnn_cell_forward更新下一个隐藏状态和缓存
- 存储隐藏状态到 a
- 存储预测值到 y
- 添加cache到caches
- 返回a, y 和 caches
def rnn_forward(x, a0, parameters):
"""
说明:
x -- 输入,shape (n_x, m, T_x).
a0 -- 初始隐藏状态,shape (n_a, m)
parameters -- python字典包含:
Waa -- shape (n_a, n_a)
Wax -- shape (n_a, n_x)
Wya -- shape (n_y, n_a)
ba -- shape (n_a, 1)
by -- shape (n_y, 1)
Returns:
a -- shape (n_a, m, T_x)
y_pred -- shape (n_y, m, T_x)
caches --元组,包含向后传递所需的值 (list of caches, x)
"""
# 初始化,包含caches列表
caches = []
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wya"].shape
# 初始化
a = np.zeros([n_a, m,T_x])
y_pred = np.zeros([n_y, m, T_x])
# 初始化a_next 为a0
a_next = a0
# 循环time-steps
for t in range(T_x):
a_next, yt_pred, cache = rnn_cell_forward(x[:,:,t], a_next, parameters)
a[:,:,t] = a_next
y_pred[:,:,t] = yt_pred
caches.append(cache)
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y_pred, caches
2.长短期记忆(LSTM)
LSTM基本结构:
关于门
-
遗忘门
-
更新门
-
更新单元
新状态为:
-
输出门
2.1 LSTM单元
实现步骤
- 将a<t-1>和x<t>上下连接起来,组成矩阵concat
- 计算前面的公式
- 计算预测值y<t>
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Arguments:
xt -- shape (n_x, m).
a_prev -- shape (n_a, m)
c_prev -- shape (n_a, m)
parameters -- python 字典:
Wf -- shape (n_a, n_a + n_x)
bf -- shape (n_a, 1)
Wi -- shape (n_a, n_a + n_x)
bi -- shape (n_a, 1)
Wc -- shape (n_a, n_a + n_x)
bc -- shape (n_a, 1)
Wo -- shape (n_a, n_a + n_x)
bo -- shape (n_a, 1)
Wy -- shape (n_y, n_a)
by -- shape (n_y, 1)
Returns:
a_next -- shape (n_a, m)
c_next -- shape (n_a, m)
yt_pred -- shape (n_y, m)
cache --元组,包含向后传递所需的值(a_next, c_next, a_prev, c_prev, xt, parameters)
注意: ft / it / ot 代表 遗忘门、更新门、输出门
cct :代表候选值 (c tilda),
c :代表记忆值
"""
# 检索参数
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
n_x, m = xt.shape
n_y, n_a = Wy.shape
# 连接 a_prev 和 xt 在一个矩阵中
concat = np.zeros([n_x+n_a,m])
concat[: n_a, :] = a_prev
concat[n_a :, :] = xt
# 计算 ft, it, cct, c_next, ot, a_next 用下面的公式:
ft = sigmoid(np.dot(Wf,concat) + bf)
it = sigmoid(np.dot(Wi,concat) + bi)
cct = np.tanh(np.dot(Wc,concat) + bc)
c_next = ft*c_prev + it*cct
ot = sigmoid(np.dot(Wo,concat) + bo)
a_next = ot * np.tanh(c_next)
# 计算预测值
yt_pred = softmax(np.dot(Wy,a_next) + by)
# 存储反向传播需要的值到 cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
2.2 LSTM向前传播
现在您已经实现了LSTM的一个步骤,现在可以使用for循环来处理一系列Tx输入。
def lstm_forward(x, a0, parameters):
"""
Arguments:
x -- shape (n_x, m, T_x).
a0 -- shape (n_a, m)
parameters -- python 字典:
Wf -- shape (n_a, n_a + n_x)
bf -- shape (n_a, 1)
Wi -- shape (n_a, n_a + n_x)
bi -- shape (n_a, 1)
Wc -- shape (n_a, n_a + n_x)
bc -- shape (n_a, 1)
Wo -- shape (n_a, n_a + n_x)
bo -- shape (n_a, 1)
Wy -- shape (n_y, n_a)
by -- shape (n_y, 1)
Returns:
a -- shape (n_a, m, T_x)
y -- shape (n_y, m, T_x)
caches -- 元组,包含向后传递所需的值 (list of all the caches, x)
"""
caches = []
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wy"].shape
# initialize "a", "c" and "y" with zeros (≈3 lines)
a = np.zeros((n_a, m, T_x))
c = np.zeros((n_a, m, T_x))
y = np.zeros((n_y, m, T_x))
# 初始化a_next and c_next
a_next = a0
c_next = np.zeros((n_a, m))
# 循环 time-steps
for t in range(T_x):
a_next, c_next, yt, cache = lstm_cell_forward(x[:,:,t], a_next, c_next, parameters)
a[:,:,t] = a_next
y[:,:,t] = yt
c[:,:,t] = c_next
caches.append(cache)
caches = (caches, x)
return a, y, c, caches
3. 反向传播
用框架实现。。。
备注
文件rnn_utils代码如下:
import numpy as np
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
s["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
s["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
### END CODE HERE ###
return v, s
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1**t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1**t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * (grads["dW" + str(l+1)] ** 2)
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * (grads["db" + str(l+1)] ** 2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2 ** t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2 ** t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v_corrected["dW" + str(l+1)] / np.sqrt(s_corrected["dW" + str(l+1)] + epsilon)
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v_corrected["db" + str(l+1)] / np.sqrt(s_corrected["db" + str(l+1)] + epsilon)
### END CODE HERE ###
return parameters, v, s