diff --git a/fluid/DeepQNetwork/DuelingDQN_agent.py b/fluid/DeepQNetwork/DuelingDQN_agent.py index d6224ef34a2cb1ec0a09d9ed2e87a2f89ab82142..271a767b7b5841cf1abe213fc477859e3cf5dd05 100644 --- a/fluid/DeepQNetwork/DuelingDQN_agent.py +++ b/fluid/DeepQNetwork/DuelingDQN_agent.py @@ -158,7 +158,8 @@ class DuelingDQNModel(object): for i, var in enumerate(policy_vars): sync_op = fluid.layers.assign(policy_vars[i], target_vars[i]) sync_ops.append(sync_op) - sync_program = sync_program.prune(sync_ops) + # The prune API is deprecated, please don't use it any more. + sync_program = sync_program._prune(sync_ops) return sync_program def act(self, state, train_or_test): diff --git a/fluid/DeepQNetwork/atari.py b/fluid/DeepQNetwork/atari.py index 46b7542019121b36e3e8923dba350e1d8a71fa34..ec793cba15ddc1c42986689eaad5773875a4ffde 100644 --- a/fluid/DeepQNetwork/atari.py +++ b/fluid/DeepQNetwork/atari.py @@ -9,7 +9,7 @@ import gym from gym import spaces from gym.envs.atari.atari_env import ACTION_MEANING -from ale_python_interface import ALEInterface +from atari_py import ALEInterface __all__ = ['AtariPlayer']