Double Q-learning实战:如何用Python解决过估计问题(附代码示例)
Double Q-learning实战如何用Python解决过估计问题附代码示例强化学习中的Q-learning算法因其简洁高效而广受欢迎但在某些场景下会出现严重的过估计问题。本文将深入探讨这一现象的本质并手把手教你用Python实现Double Q-learning算法通过代码对比展示其如何有效解决过估计问题。1. Q-learning的过估计问题理论与现象在标准Q-learning中我们通过以下公式更新Q值Q(s,a) Q(s,a) α * (r γ * max_a Q(s,a) - Q(s,a))这里的max操作正是过估计的根源。假设在某状态下有两个动作a1和a2其真实Q值分别为1.0和0.9。由于采样误差估计值可能变为1.2和0.8。此时max会选择1.2导致更新时使用了比真实最大值更大的值。这种现象在Atari游戏等复杂环境中尤为明显。van Hasselt在2010年的论文中通过数学证明指出Q-learning的过估计偏差可以表示为E[max Q(s,a)] ≥ max E[Q(s,a)]为了直观理解我们模拟一个简单的过估计场景import numpy as np # 真实Q值 true_q [1.0, 0.9, 0.8] # 估计Q值带有噪声 estimated_q true_q np.random.normal(0, 0.2, size3) print(f真实最大值: {max(true_q):.2f}) print(f估计最大值: {max(estimated_q):.2f}) print(f过估计量: {max(estimated_q) - max(true_q):.2f})多次运行会发现估计最大值经常高于真实最大值。这种偏差会通过贝尔曼方程传播导致整个学习过程不稳定。2. Double Q-learning算法原理Double Q-learning的核心思想是使用两个独立的Q函数(Q_A和Q_B)来解耦动作选择和价值评估用Q_A选择最优动作a* argmax_a Q_A(s,a)用Q_B评估该动作的价值Q_B(s,a*)更新Q_AQ_A(s,a) α * (r γ * Q_B(s,a*) - Q_A(s,a))交替更新Q_B使用相同的逻辑这种解耦使得即使一个Q函数过估计了某个动作只要另一个Q函数没有同样过估计最终的更新就不会受到严重影响。数学上可以证明Double Q-learning会产生轻微的欠估计但相比Q-learning的过估计这种偏差通常更可控E[Q_B(s,argmax_a Q_A(s,a))] ≤ max E[Q(s,a)]3. Python实现与对比实验我们以一个简单的GridWorld环境为例对比标准Q-learning和Double Q-learning的表现。3.1 环境设置import numpy as np class GridWorld: def __init__(self): self.size 5 self.goal (4,4) self.trap (2,2) self.reset() def reset(self): self.state (0,0) return self.state def step(self, action): x, y self.state if action 0: # 上 x max(0, x-1) elif action 1: # 右 y min(self.size-1, y1) elif action 2: # 下 x min(self.size-1, x1) elif action 3: # 左 y max(0, y-1) self.state (x,y) if self.state self.goal: return self.state, 10, True elif self.state self.trap: return self.state, -10, True else: return self.state, -1, False3.2 Q-learning实现class QLearningAgent: def __init__(self, n_states, n_actions, alpha0.1, gamma0.95, epsilon0.1): self.q_table np.zeros((n_states, n_actions)) self.alpha alpha self.gamma gamma self.epsilon epsilon def choose_action(self, state): if np.random.random() self.epsilon: return np.random.randint(0, len(self.q_table[state])) return np.argmax(self.q_table[state]) def learn(self, state, action, reward, next_state, done): predict self.q_table[state][action] target reward if done else reward self.gamma * np.max(self.q_table[next_state]) self.q_table[state][action] self.alpha * (target - predict)3.3 Double Q-learning实现class DoubleQLearningAgent: def __init__(self, n_states, n_actions, alpha0.1, gamma0.95, epsilon0.1): self.q_a np.zeros((n_states, n_actions)) self.q_b np.zeros((n_states, n_actions)) self.alpha alpha self.gamma gamma self.epsilon epsilon def choose_action(self, state): if np.random.random() self.epsilon: return np.random.randint(0, len(self.q_a[state])) return np.argmax(self.q_a[state] self.q_b[state]) def learn(self, state, action, reward, next_state, done): # 随机选择更新Q_A或Q_B if np.random.random() 0.5: q_predict self.q_a[state][action] if done: q_target reward else: best_action np.argmax(self.q_a[next_state]) q_target reward self.gamma * self.q_b[next_state][best_action] self.q_a[state][action] self.alpha * (q_target - q_predict) else: q_predict self.q_b[state][action] if done: q_target reward else: best_action np.argmax(self.q_b[next_state]) q_target reward self.gamma * self.q_a[next_state][best_action] self.q_b[state][action] self.alpha * (q_target - q_predict)3.4 训练与结果对比def train_agent(env, agent, episodes1000): rewards [] for episode in range(episodes): state env.reset() total_reward 0 done False while not done: action agent.choose_action(state[0]*env.size state[1]) next_state, reward, done env.step(action) agent.learn(state[0]*env.size state[1], action, reward, next_state[0]*env.size next_state[1], done) state next_state total_reward reward rewards.append(total_reward) return rewards # 训练并比较两种算法 env GridWorld() q_agent QLearningAgent(25, 4) double_q_agent DoubleQLearningAgent(25, 4) q_rewards train_agent(env, q_agent) double_q_rewards train_agent(env, double_q_agent)通过绘制两种算法的奖励曲线可以明显看到Double Q-learning更稳定且最终表现更好import matplotlib.pyplot as plt plt.plot(np.convolve(q_rewards, np.ones(100)/100, modevalid), labelQ-learning) plt.plot(np.convolve(double_q_rewards, np.ones(100)/100, modevalid), labelDouble Q-learning) plt.xlabel(Episode) plt.ylabel(Average Reward) plt.legend() plt.show()4. 高级技巧与超参数调优4.1 学习率调度动态调整学习率可以提升算法性能class DoubleQLearningAgent: def __init__(self, n_states, n_actions, alpha0.1, gamma0.95, epsilon0.1): # ... 其他初始化 ... self.alpha_decay 0.9995 self.min_alpha 0.01 def learn(self, state, action, reward, next_state, done): self.alpha max(self.min_alpha, self.alpha * self.alpha_decay) # ... 其余学习逻辑 ...4.2 探索策略优化ε-greedy策略可以随时间衰减class DoubleQLearningAgent: def __init__(self, n_states, n_actions, alpha0.1, gamma0.95, epsilon1.0): # ... 其他初始化 ... self.epsilon_decay 0.995 self.min_epsilon 0.01 def choose_action(self, state): self.epsilon max(self.min_epsilon, self.epsilon * self.epsilon_decay) # ... 其余动作选择逻辑 ...4.3 经验回放集成虽然传统Double Q-learning不使用经验回放但我们可以结合from collections import deque import random class ReplayBuffer: def __init__(self, capacity): self.buffer deque(maxlencapacity) def push(self, state, action, reward, next_state, done): self.buffer.append((state, action, reward, next_state, done)) def sample(self, batch_size): return random.sample(self.buffer, batch_size) def __len__(self): return len(self.buffer) class DoubleQLearningWithReplay(DoubleQLearningAgent): def __init__(self, n_states, n_actions, batch_size32, buffer_size10000, **kwargs): super().__init__(n_states, n_actions, **kwargs) self.buffer ReplayBuffer(buffer_size) self.batch_size batch_size def learn(self, state, action, reward, next_state, done): self.buffer.push(state, action, reward, next_state, done) if len(self.buffer) self.batch_size: batch self.buffer.sample(self.batch_size) for s, a, r, ns, d in batch: super().learn(s, a, r, ns, d)5. 实际应用中的注意事项网络架构设计当结合深度神经网络时可以共享部分网络参数来减少计算量目标网络定期更新目标网络可以进一步提高稳定性偏差-方差权衡Double Q-learning减少了方差但可能增加偏差需要平衡多步学习结合n步回报可以加速学习但可能引入额外偏差以下是一个结合了上述改进的完整实现示例class AdvancedDoubleQLearning: def __init__(self, n_states, n_actions, alpha0.1, gamma0.95, epsilon1.0): self.q_a np.random.uniform(-0.1, 0.1, (n_states, n_actions)) self.q_b np.random.uniform(-0.1, 0.1, (n_states, n_actions)) self.alpha alpha self.gamma gamma self.epsilon epsilon self.alpha_decay 0.9995 self.min_alpha 0.01 self.epsilon_decay 0.995 self.min_epsilon 0.01 self.target_update_freq 100 self.steps 0 self.q_a_target self.q_a.copy() self.q_b_target self.q_b.copy() def choose_action(self, state): self.epsilon max(self.min_epsilon, self.epsilon * self.epsilon_decay) if np.random.random() self.epsilon: return np.random.randint(0, len(self.q_a[state])) return np.argmax(self.q_a[state] self.q_b[state]) def learn(self, state, action, reward, next_state, done): self.steps 1 self.alpha max(self.min_alpha, self.alpha * self.alpha_decay) # 随机选择更新Q_A或Q_B if np.random.random() 0.5: q_predict self.q_a[state][action] if done: q_target reward else: best_action np.argmax(self.q_a_target[next_state]) q_target reward self.gamma * self.q_b_target[next_state][best_action] self.q_a[state][action] self.alpha * (q_target - q_predict) else: q_predict self.q_b[state][action] if done: q_target reward else: best_action np.argmax(self.q_b_target[next_state]) q_target reward self.gamma * self.q_a_target[next_state][best_action] self.q_b[state][action] self.alpha * (q_target - q_predict) # 定期更新目标网络 if self.steps % self.target_update_freq 0: self.q_a_target self.q_a.copy() self.q_b_target self.q_b.copy()在实际项目中我发现将ε的初始值设为1.0并缓慢衰减配合动态调整的学习率能够取得比固定参数更好的效果。同时目标网络的更新频率需要根据具体问题调整——对于更稳定的环境可以降低更新频率而对于快速变化的环境则需要更频繁地更新。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2463855.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!