diff --git a/examples/DQN/atari_agent.py b/examples/DQN/atari_agent.py index 4af4478048bc582f5951446920a1686ed497b3b4..8a33ac4369f4d9f0c55d12c82b6fded63eedbc77 100644 --- a/examples/DQN/atari_agent.py +++ b/examples/DQN/atari_agent.py @@ -106,7 +106,7 @@ class AtariAgent(parl.Agent): 'reward': reward, 'next_obs': next_obs.astype('float32'), 'terminal': terminal, - 'lr': lr + 'lr': np.float32(lr) } cost = self.fluid_executor.run( self.learn_program, feed=feed, fetch_list=[self.cost])[0] diff --git a/examples/IMPALA/train.py b/examples/IMPALA/train.py index 8440ee78cec30f5de568ea277769fe1df938ed9f..bfb4ad5c0343a92a6179a44a9d5b6e1ecb9bc79b 100755 --- a/examples/IMPALA/train.py +++ b/examples/IMPALA/train.py @@ -121,7 +121,9 @@ class Learner(object): yield [ obs_np, actions_np, behaviour_logits_np, rewards_np, - dones_np, self.lr, self.entropy_coeff + dones_np, + np.float32(self.lr), + np.float32(self.entropy_coeff) ] def run_learn(self): diff --git a/parl/algorithms/fluid/impala/impala.py b/parl/algorithms/fluid/impala/impala.py index 025f96f2650e3351552d6525c910d2f29406dbaa..bdfe53a67223165c13454260b59fc8e0a650b0e0 100644 --- a/parl/algorithms/fluid/impala/impala.py +++ b/parl/algorithms/fluid/impala/impala.py @@ -78,6 +78,7 @@ class VTraceLoss(object): self.entropy = layers.reduce_sum(policy_entropy) # The summed weighted loss + entropy_coeff = layers.reshape(entropy_coeff, shape=[1]) self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff + self.entropy * entropy_coeff)