Deep Reinforcement Learning in Python
Timothée Carayol
Principal Machine Learning Engineer, Komment
select_action()
def select_action(q_values, step, start, end, decay):
# Calculate the threshold value for this step epsilon = ( end + (start-end) * math.exp(-step / decay))
# Draw a random number between 0 and 1 sample = random.random()
if sample < epsilon: # Return a random action index return random.choice(range(len(q_values)))
# Return the action index with highest Q-value return torch.argmax(q_values).item()
online_network = QNetwork(state_size, action_size) target_network = QNetwork(state_size, action_size)
target_network.load_state_dict( online_network.state_dict())
def update_target_network( target_network, online_network, tau):
target_net_state_dict = target_network.state_dict() online_net_state_dict = online_network.state_dict() for key in online_net_state_dict:
target_net_state_dict[key] = ( online_net_state_dict[key] * tau + target_net_state_dict[key] * (1 - tau))
target_network.load_state_dict( target_net_state_dict)
return None
# In the inner loop, after action selection if len(replay_buffer) >= batch_size: states, actions, rewards, next_states, dones = replay_buffer.sample(64)
q_values = (online_network(states) .gather(1, actions).squeeze(1))
with torch.no_grad():
next_q_values = ( target_network(next_states).amax(1)) target_q_values = ( rewards + gamma * next_q_values * (1 - dones))
loss = torch.nn.MSELoss()(target_q_values, q_values) optimizer.zero_grad() loss.backward() optimizer.step()
update_target_network( target_network, online_network, tau)
online_network
target_network
torch.no_grad()
to disable gradient tracking for target Q-valuesupdate_target_network()
to slowly update target_network
Deep Reinforcement Learning in Python