diff --git a/examples/A2C/atari_agent.py b/examples/A2C/atari_agent.py index 5604f71016538650b0ed0355dd6cd2856f52c60e..94d2125214a9ef52273763cea6cc0213cc34c963 100755 --- a/examples/A2C/atari_agent.py +++ b/examples/A2C/atari_agent.py @@ -71,7 +71,10 @@ class AtariAgent(parl.Agent): lr = layers.data( name='lr', shape=[1], dtype='float32', append_batch_size=False) entropy_coeff = layers.data( - name='entropy_coeff', shape=[], dtype='float32') + name='entropy_coeff', + shape=[1], + dtype='float32', + append_batch_size=False) total_loss, pi_loss, vf_loss, entropy = self.alg.learn( obs, actions, advantages, target_values, lr, entropy_coeff) diff --git a/examples/IMPALA/atari_agent.py b/examples/IMPALA/atari_agent.py index 98d4a4c4fd3ea611f60f2d8da850265025541b4b..0746f951f6920a70b0af87430af51879b635ada7 100755 --- a/examples/IMPALA/atari_agent.py +++ b/examples/IMPALA/atari_agent.py @@ -58,7 +58,10 @@ class AtariAgent(parl.Agent): lr = layers.data( name='lr', shape=[1], dtype='float32', append_batch_size=False) entropy_coeff = layers.data( - name='entropy_coeff', shape=[], dtype='float32') + name='entropy_coeff', + shape=[1], + dtype='float32', + append_batch_size=False) self.learn_reader = fluid.layers.create_py_reader_by_data( capacity=32, diff --git a/examples/IMPALA/train.py b/examples/IMPALA/train.py index bfb4ad5c0343a92a6179a44a9d5b6e1ecb9bc79b..cf9e55c54d1df8a14fc1751ac75303c6adab42ad 100755 --- a/examples/IMPALA/train.py +++ b/examples/IMPALA/train.py @@ -123,7 +123,7 @@ class Learner(object): obs_np, actions_np, behaviour_logits_np, rewards_np, dones_np, np.float32(self.lr), - np.float32(self.entropy_coeff) + np.array([self.entropy_coeff], dtype='float32') ] def run_learn(self): diff --git a/examples/LiftSim_baseline/A2C/lift_agent.py b/examples/LiftSim_baseline/A2C/lift_agent.py index 9c2d64d64c0f89dc939a2458d1b2e9fedbb5242a..1dd35e59a3c840b6468f5d95bcc6fccc07445bc8 100644 --- a/examples/LiftSim_baseline/A2C/lift_agent.py +++ b/examples/LiftSim_baseline/A2C/lift_agent.py @@ -67,7 +67,10 @@ class LiftAgent(parl.Agent): lr = layers.data( name='lr', shape=[1], dtype='float32', append_batch_size=False) entropy_coeff = layers.data( - name='entropy_coeff', shape=[], dtype='float32') + name='entropy_coeff', + shape=[1], + dtype='float32', + append_batch_size=False) total_loss, pi_loss, vf_loss, entropy = self.alg.learn( obs, actions, advantages, target_values, lr, entropy_coeff) diff --git a/parl/algorithms/fluid/a3c.py b/parl/algorithms/fluid/a3c.py index 27aa1dc8785315e4347cebcc3b13f8d80659e0a9..9b9f57e8eb5bfd59e3f79c1fc42e4d1374618f23 100644 --- a/parl/algorithms/fluid/a3c.py +++ b/parl/algorithms/fluid/a3c.py @@ -72,7 +72,6 @@ class A3C(Algorithm): policy_entropy = policy_distribution.entropy() entropy = layers.reduce_sum(policy_entropy) - entropy_coeff = layers.reshape(entropy_coeff, shape=[1]) total_loss = ( pi_loss + vf_loss * self.vf_loss_coeff + entropy * entropy_coeff) diff --git a/parl/algorithms/fluid/impala/impala.py b/parl/algorithms/fluid/impala/impala.py index bdfe53a67223165c13454260b59fc8e0a650b0e0..025f96f2650e3351552d6525c910d2f29406dbaa 100644 --- a/parl/algorithms/fluid/impala/impala.py +++ b/parl/algorithms/fluid/impala/impala.py @@ -78,7 +78,6 @@ class VTraceLoss(object): self.entropy = layers.reduce_sum(policy_entropy) # The summed weighted loss - entropy_coeff = layers.reshape(entropy_coeff, shape=[1]) self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff + self.entropy * entropy_coeff)