DQN 系列算法

一、 DoubleDQN


相当于把不同的DQN代码进行融合得到的效果

彩虹DQN

DQN会过高的评估自己,Q 值评估会比较高

随着游戏的进行, 期望的 Q 会越来越大, 不利于网络训练。我们看下 DQN 以及 DoubleDQN 的目标函数, 唯一区别就是加入Q_{\phi_B}, 也就是 B网络抑制高估问题

  • DQN target Valuey_j = r_j + \gamma \max_{{\mathbf{a}'_j}} Q_{\phi'}(\mathbf{s}'_j, \mathbf{a}'_j)
  • DoubleDQN target valueQ_{\phi_A}(\mathbf{s}, \mathbf{a}) \leftarrow r + \gamma Q_{\phi_B}\Big(\mathbf{s}^\prime, \arg\max_{\mathbf{a}^\prime} Q_{\phi_A}(\mathbf{s}^\prime, \mathbf{a}^\prime)\Big)
    至于模型为什么会产生高估的问题, 下面一张图可以清晰的进行解释:
    产生高估的原因

    DoubleDQN

代码如下所示(来源:【强化学习】双深度Q网络(DDQN)求解倒立摆问题 + Pytorch代码实战):

import argparse
import datetime
import time
import math
import torch.optim as optim
import gym
from torch import nn

# 这里需要改成自己的RL_Utils.py文件的路径
from Python.ReinforcementLearning.EasyRL.RL_Utils import *


# Q网络(3层全连接网络)
class MLP(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=128):
        """ 初始化q网络,为全连接网络
            input_dim: 输入的特征数即环境的状态维度
            output_dim: 输出的动作维度
        """
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)  # 输入层
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)  # 隐藏层
        self.fc3 = nn.Linear(hidden_dim, output_dim)  # 输出层

    def forward(self, x):
        # 各层对应的激活函数
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# 经验回放缓存区
class ReplayBuffer:
    def __init__(self, capacity):
        self.capacity = capacity  # 经验回放的容量
        self.buffer = []  # 缓冲区
        self.position = 0

    def push(self, state, action, reward, next_state, done):
        ''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
        '''
        if len(self.buffer) < self.capacity:
            self.buffer.append(None)
        self.buffer[self.position] = (state, action, reward, next_state, done)
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)  # 随机采出小批量转移
        state, action, reward, next_state, done = zip(*batch)  # 解压成状态,动作等
        return state, action, reward, next_state, done

    def __len__(self):
        ''' 返回当前存储的量
        '''
        return len(self.buffer)


# DDQN智能体对象
class DDQN:
    def __init__(self, model, memory, cfg):

        self.n_actions = cfg['n_actions']
        self.device = torch.device(cfg['device'])
        self.gamma = cfg['gamma']
        ## e-greedy 探索策略参数
        self.sample_count = 0  # 采样次数
        self.epsilon = cfg['epsilon_start']
        self.sample_count = 0
        self.epsilon_start = cfg['epsilon_start']
        self.epsilon_end = cfg['epsilon_end']
        self.epsilon_decay = cfg['epsilon_decay']
        self.batch_size = cfg['batch_size']
        self.policy_net = model.to(self.device)
        self.target_net = model.to(self.device)
        # 初始化的时候,目标Q网络和估计Q网络相等,将策略网络的参数复制给目标网络
        self.target_net.load_state_dict(self.policy_net.state_dict())

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg['lr'])
        self.memory = memory
        self.update_flag = False

    # 训练过程采样:e-greedy policy
    def sample_action(self, state):
        self.sample_count += 1
        self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
                       math.exp(-1. * self.sample_count / self.epsilon_decay)
        if random.random() > self.epsilon:
            return self.predict_action(state)
        else:
            action = random.randrange(self.n_actions)
        return action

    # 测试过程:以最大Q值选取动作
    def predict_action(self, state):
        with torch.no_grad():
            state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(dim=0)
            q_values = self.policy_net(state)
            action = q_values.max(1)[1].item()
        return action

    def update(self):
        # 当经验缓存区没有满的时候,不进行更新
        if len(self.memory) < self.batch_size:
            return
        else:
            if not self.update_flag:
                print("Begin to update!")
                self.update_flag = True
        # 从经验缓存区随机取出一个batch的数据
        state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
            self.batch_size)
        # 将数据转化成Tensor格式
        state_batch = torch.tensor(np.array(state_batch), device=self.device,
                                   dtype=torch.float)  # shape(batchsize,n_states)
        action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)  # shape(batchsize,1)
        reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float).unsqueeze(
            1)  # shape(batchsize,1)
        next_state_batch = torch.tensor(np.array(next_state_batch), device=self.device,
                                        dtype=torch.float)  # shape(batchsize,n_states)
        done_batch = torch.tensor(np.float32(done_batch), device=self.device).unsqueeze(1)  # shape(batchsize,1)
        # 计算Q估计
        q_value_batch = self.policy_net(state_batch).gather(dim=1,
                                                            index=action_batch)  # shape(batchsize,1),requires_grad=True
        # DDQN和DQN不同之处!DDQN先用policy_net预测处最大的动作,然后再用target_net预测其Q值
        # next_max_q_value_batch = self.policy_net(next_state_batch).max(1)[0].detach().unsqueeze(1)

        next_q_value_batch = self.policy_net(next_state_batch)
        next_target_value_batch = self.target_net(next_state_batch)  # type = Tensor , shape([batch_size, n_actions])

        # gather函数的功能可以解释为根据 index 参数(即是索引)返回数组里面对应位置的值 , 第一个参数为1代表按列索引,为0代表按行索引
        # unsqueeze函数起到了升维的作用,例如 torch.Size([6]):tensor([0, 1, 2, 3, 4, 5]).unsqueeze(0) => torch.Size([1, 6])
        # torch.max(tensorData,dim) 返回输入张量给定维度上每行的最大值,并同时返回每个最大值的位置索引。
        # .detach(): 输入一个张量,返回一个不具有梯度的张量(返回的张量将永久失去梯度,即使修改其requires_grad属性也无法改变)
        next_max_q_value_batch = next_target_value_batch.gather(1, torch.max(next_q_value_batch, 1)[1].unsqueeze(1))

        # 计算Q现实
        expected_q_value_batch = reward_batch + self.gamma * next_max_q_value_batch * (1 - done_batch)
        # 计算损失函数MSE(Q估计,Q现实)
        loss = nn.MSELoss()(q_value_batch, expected_q_value_batch)
        # 梯度下降
        self.optimizer.zero_grad()
        loss.backward()
        # 限制梯度的范围,以避免梯度爆炸
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1.0, 1.0)
        self.optimizer.step()

    def save_model(self, path):
        Path(path).mkdir(parents=True, exist_ok=True)
        torch.save(self.target_net.state_dict(), f"{path}/checkpoint.pt")

    def load_model(self, path):
        self.target_net.load_state_dict(torch.load(f"{path}/checkpoint.pt"))
        for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
            param.data.copy_(target_param.data)


# 训练函数
def train(arg_dict, env, agent):
    # 开始计时
    startTime = time.time()
    print(f"环境名: {arg_dict['env_name']}, 算法名: {arg_dict['algo_name']}, Device: {arg_dict['device']}")
    print("开始训练智能体......")
    rewards = []
    steps = []
    for i_ep in range(arg_dict["train_eps"]):
        ep_reward = 0
        ep_step = 0
        state = env.reset()
        for _ in range(arg_dict['ep_max_steps']):
            # 画图
            if arg_dict['train_render']:
                env.render()
            ep_step += 1
            action = agent.sample_action(state)
            next_state, reward, done, _ = env.step(action)
            agent.memory.push(state, action, reward,
                              next_state, done)
            state = next_state
            agent.update()
            ep_reward += reward
            if done:
                break
        # 目标网络更新
        if (i_ep + 1) % arg_dict["target_update"] == 0:
            agent.target_net.load_state_dict(agent.policy_net.state_dict())
        steps.append(ep_step)
        rewards.append(ep_reward)
        if (i_ep + 1) % 10 == 0:
            print(f'Episode: {i_ep + 1}/{arg_dict["train_eps"]}, Reward: {ep_reward:.2f}: Epislon: {agent.epsilon:.3f}')
    print('训练结束 , 用时: ' + str(time.time() - startTime) + " s")
    # 关闭环境
    env.close()
    return {'episodes': range(len(rewards)), 'rewards': rewards}


# 测试函数
def test(arg_dict, env, agent):
    startTime = time.time()
    print("开始测试智能体......")
    print(f"环境名: {arg_dict['env_name']}, 算法名: {arg_dict['algo_name']}, Device: {arg_dict['device']}")
    rewards = []
    steps = []
    for i_ep in range(arg_dict['test_eps']):
        ep_reward = 0
        ep_step = 0
        state = env.reset()
        for _ in range(arg_dict['ep_max_steps']):
            # 画图
            if arg_dict['test_render']:
                env.render()
            ep_step += 1
            action = agent.predict_action(state)
            next_state, reward, done, _ = env.step(action)
            state = next_state
            ep_reward += reward
            if done:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        print(f"Episode: {i_ep + 1}/{arg_dict['test_eps']},Reward: {ep_reward:.2f}")
    print("测试结束 , 用时: " + str(time.time() - startTime) + " s")
    env.close()
    return {'episodes': range(len(rewards)), 'rewards': rewards}


# 创建环境和智能体
def create_env_agent(arg_dict):
    # 创建环境
    env = gym.make(arg_dict['env_name'])
    # 设置随机种子
    all_seed(env, seed=arg_dict["seed"])
    # 获取状态数
    try:
        n_states = env.observation_space.n
    except AttributeError:
        n_states = env.observation_space.shape[0]
    # 获取动作数
    n_actions = env.action_space.n
    print(f"状态数: {n_states}, 动作数: {n_actions}")
    # 将状态数和动作数加入算法参数字典
    arg_dict.update({"n_states": n_states, "n_actions": n_actions})
    # 实例化智能体对象
    # Q网络模型
    model = MLP(n_states, n_actions, hidden_dim=arg_dict["hidden_dim"])
    # 回放缓存区对象
    memory = ReplayBuffer(arg_dict["memory_capacity"])
    # 智能体
    agent = DDQN(model, memory, arg_dict)
    # 返回环境,智能体
    return env, agent


if __name__ == '__main__':
    # 防止报错 OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    # 获取当前路径
    curr_path = os.path.dirname(os.path.abspath(__file__))
    # 获取当前时间
    curr_time = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
    # 相关参数设置
    parser = argparse.ArgumentParser(description="hyper parameters")
    parser.add_argument('--algo_name', default='DDQN', type=str, help="name of algorithm")
    parser.add_argument('--env_name', default='CartPole-v0', type=str, help="name of environment")
    parser.add_argument('--train_eps', default=200, type=int, help="episodes of training")
    parser.add_argument('--test_eps', default=20, type=int, help="episodes of testing")
    parser.add_argument('--ep_max_steps', default=100000, type=int,
                        help="steps per episode, much larger value can simulate infinite steps")
    parser.add_argument('--gamma', default=0.95, type=float, help="discounted factor")
    parser.add_argument('--epsilon_start', default=0.95, type=float, help="initial value of epsilon")
    parser.add_argument('--epsilon_end', default=0.01, type=float, help="final value of epsilon")
    parser.add_argument('--epsilon_decay', default=500, type=int,
                        help="decay rate of epsilon, the higher value, the slower decay")
    parser.add_argument('--lr', default=0.0001, type=float, help="learning rate")
    parser.add_argument('--memory_capacity', default=100000, type=int, help="memory capacity")
    parser.add_argument('--batch_size', default=64, type=int)
    parser.add_argument('--target_update', default=4, type=int)
    parser.add_argument('--hidden_dim', default=256, type=int)
    parser.add_argument('--device', default='cpu', type=str, help="cpu or cuda")
    parser.add_argument('--seed', default=520, type=int, help="seed")
    parser.add_argument('--show_fig', default=False, type=bool, help="if show figure or not")
    parser.add_argument('--save_fig', default=True, type=bool, help="if save figure or not")
    parser.add_argument('--train_render', default=False, type=bool,
                        help="Whether to render the environment during training")
    parser.add_argument('--test_render', default=True, type=bool,
                        help="Whether to render the environment during testing")
    args = parser.parse_args()
    default_args = {'result_path': f"{curr_path}/outputs/{args.env_name}/{curr_time}/results/",
                    'model_path': f"{curr_path}/outputs/{args.env_name}/{curr_time}/models/",
                    }
    # 将参数转化为字典 type(dict)
    arg_dict = {**vars(args), **default_args}
    print("算法参数字典:", arg_dict)

    # 创建环境和智能体
    env, agent = create_env_agent(arg_dict)
    # 传入算法参数、环境、智能体,然后开始训练
    res_dic = train(arg_dict, env, agent)
    print("算法返回结果字典:", res_dic)
    # 保存相关信息
    agent.save_model(path=arg_dict['model_path'])
    save_args(arg_dict, path=arg_dict['result_path'])
    save_results(res_dic, tag='train', path=arg_dict['result_path'])
    plot_rewards(res_dic['rewards'], arg_dict, path=arg_dict['result_path'], tag="train")

    # =================================================================================================
    # 创建新环境和智能体用来测试
    print("=" * 300)
    env, agent = create_env_agent(arg_dict)
    # 加载已保存的智能体
    agent.load_model(path=arg_dict['model_path'])
    res_dic = test(arg_dict, env, agent)
    save_results(res_dic, tag='test', path=arg_dict['result_path'])
    plot_rewards(res_dic['rewards'], arg_dict, path=arg_dict['result_path'], tag="test")

二、 Dueling-DQN


上述图表明, 我 w*x 只能一一匹配, 但是不能举一反三, 这边想的是如果有一个b, 可以对上述的参数都产生影响,那就很好了, 也就是说一个偏置就够了, 下面的S可以起到全局控制的作用。







代码如下所示强化学习代码实战-06 Dueling DQN 算法

import random
import gym
import torch
import numpy as np
from matplotlib import pyplot as plt
from IPython import display

env = gym.make("Pendulum-v0")
# 智能体状态
state = env.reset()
# 动作空间
actions = env.action_space
print(state, actions)
# 打印游戏
# plt.imshow(env.render(mode='rgb_array'))
# plt.show()


"""重新定义策略价值网络Q, 比DQN性能更优"""
class VAnet(torch.nn.Module):
    def __init__(self):
        super().__init__()
        
        self.fc = torch.nn.Sequential(torch.nn.Linear(3, 128),
                                     torch.nn.ReLU())
        self.fc_A = torch.nn.Linear(128, 11)
        self.fc_V = torch.nn.Linear(128, 1)
        
    def forward(self, x):
        A = self.fc_A(self.fc(x))
        V = self.fc_V(self.fc(x))
        A_mean = A.mean(dim=1).reshape(-1, 1)
        A = A -  A_mean
        # Q值由A和V求和得到
        Q = A + V
        
        return Q
    
    
# 定义动作模型(策略网络)
model = VAnet()

# 经验网络,评估一个动作的分数(目标网络)
next_model = VAnet()
# model的参数赋予next_model
next_model.load_state_dict(model.state_dict())

# 得到一个动作
def get_action(state):
    """state: agent所处的状态。由于是连续动作,做离散化操作"""
    # 走神经网络NN,得到分值最大的那个动作。转为tensor数据
    state = torch.FloatTensor(state).reshape(1, 3)
    action = model(state).argmax().item()
    if random.random() < 0.01:
        action = random.choice(range(11))
    # 离散动作连续化
    action_continuous = action
    action_continuous /= 10
    action_continuous *= 4
    action_continuous -= 2
    
    return action, action_continuous


# 数据池
datas = []
def update_data():
    """加入新的N条数据,删除最老的M条数据"""
    count = len(datas)
    while len(datas) - count < 200:
        # 一直追加数据,尽可能多的获取环境状态
        state = env.reset()
        done = False
        while not done:
            # 由初始状态开始得到一个动作
            action, action_continuous = get_action(state)
            next_state, reward, done, _ = env.step([action_continuous])
            datas.append((state, action, reward, next_state, done))
            # 更新状态
            state = next_state
    # 此时新数据集中比原来多了大约200条样本,如果超过了最大容量,删除最开始数据
    update_count = len(datas) - count
    while len(datas) > 5000:
        datas.pop(0)
    return update_count

# 从数据池中采样
def get_sample():
    # batch size = 64, 数据类型转换为Tensor
    samples = random.sample(datas, 64)
    state = torch.FloatTensor([i[0] for i in samples]).reshape(-1, 3)
    action = torch.LongTensor([i[1] for i in samples]).reshape(-1, 1)
    reward = torch.FloatTensor([i[2] for i in samples]).reshape(-1, 1)
    next_state = torch.FloatTensor([i[3] for i in samples]).reshape(-1, 3)
    done = torch.LongTensor([i[4] for i in samples]).reshape(-1, 1)
    
    return state, action, reward, next_state, done

# 获取动作价值
def get_value(state, action):
    """根据网络输出找到对应动作的得分,使用策略网络"""
    action_value = model(state)
    action_value = action_value.gather(dim=1, index=action)
    
    return action_value

# 获取学习目标值
def get_target(next_state, reward, done):
    """使用next_state和reward计算真实得分。对价值的估计,使用目标网络"""
    with torch.no_grad():
        target = next_model(next_state)
        
    target = target.max(dim=1)[0].reshape(-1, 1)
    target *= (1 - done)        # 游戏结束的状态,没有奖励
    
    target = reward + target * 0.98
    
    return target

# 一局游戏得分测试
def test():
    reward_sum = 0
    
    state = env.reset()
    done = False
    
    while not done:
        _, action_continuous = get_action(state)
        next_state, reward, done, _ = env.step([action_continuous])
        reward_sum += reward
        state = next_state
        
    return reward_sum

def train():
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=2e-3)
    loss_fn = torch.nn.MSELoss()
    
    for epoch in range(600):
        # 更新一批数据
        update_counter = update_data()
        
        # 更新过数据后,学习N次
        for i in range(200):
            state, action, reward, next_state, done = get_sample()
            # 计算value和target
            value = get_value(state, action)
            target = get_target(next_state, reward, done)
            
            # 参数更新
            loss = loss_fn(value, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            """周期性更新目标网络"""
            if (i + 1) % 10 == 0:
                next_model.load_state_dict(model.state_dict())
            
        if epoch % 50 == 0:
            test_score = sum([test() for i in range(50)]) / 50
            print(epoch, len(datas), update_counter, test_score)

三、 MultiStep-DQN


MultiStep 其实就是计算 Q 值的时候选择多个时间步。


四、 连续动作处理方法



©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容