train.py 7.9 KB
Newer Older
H
Hongsheng Zeng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import gym
import numpy as np
import os
import queue
import six
import time
import threading
B
Bo Zhou 已提交
22
import parl
H
Hongsheng Zeng 已提交
23 24 25
from atari_model import AtariModel
from atari_agent import AtariAgent
from collections import defaultdict
F
fuyw 已提交
26

H
Hongsheng Zeng 已提交
27
from parl.env.atari_wrappers import wrap_deepmind
28
from parl.utils import logger, get_gpu_count, summary
H
Hongsheng Zeng 已提交
29 30 31
from parl.utils.scheduler import PiecewiseScheduler
from parl.utils.time_stat import TimeStat
from parl.utils.window_stat import WindowStat
B
Bo Zhou 已提交
32
from parl.utils import machine_info
H
Hongsheng Zeng 已提交
33

F
fuyw 已提交
34 35
from actor import Actor

H
Hongsheng Zeng 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49

class Learner(object):
    def __init__(self, config):
        self.config = config

        #=========== Create Agent ==========
        env = gym.make(config['env_name'])
        env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
        obs_shape = env.observation_space.shape
        act_dim = env.action_space.n
        self.config['obs_shape'] = obs_shape
        self.config['act_dim'] = act_dim

        model = AtariModel(act_dim)
B
Bo Zhou 已提交
50 51
        algorithm = parl.algorithms.A3C(
            model, vf_loss_coeff=config['vf_loss_coeff'])
52
        self.agent = AtariAgent(algorithm, config)
B
Bo Zhou 已提交
53 54

        if machine_info.is_gpu_available():
H
Hongsheng Zeng 已提交
55
            assert get_gpu_count() == 1, 'Only support training in single GPU,\
56
                    Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'
H
Hongsheng Zeng 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

        #========== Learner ==========

        self.total_loss_stat = WindowStat(100)
        self.pi_loss_stat = WindowStat(100)
        self.vf_loss_stat = WindowStat(100)
        self.entropy_stat = WindowStat(100)
        self.lr = None
        self.entropy_coeff = None

        self.learn_time_stat = TimeStat(100)
        self.start_time = None

        #========== Remote Actor ===========
        self.remote_count = 0
        self.sample_data_queue = queue.Queue()

        self.remote_metrics_queue = queue.Queue()
        self.sample_total_steps = 0

        self.params_queues = []
F
fuyw 已提交
78
        self.create_actors()
H
Hongsheng Zeng 已提交
79

F
fuyw 已提交
80 81
    def create_actors(self):
        """ Connect to the cluster and start sampling of the remote actor.
H
Hongsheng Zeng 已提交
82
        """
F
fuyw 已提交
83 84
        parl.connect(self.config['master_address'])

H
Hongsheng Zeng 已提交
85 86 87 88 89 90 91 92 93 94 95
        logger.info('Waiting for {} remote actors to connect.'.format(
            self.config['actor_num']))

        for i in six.moves.range(self.config['actor_num']):
            params_queue = queue.Queue()
            self.params_queues.append(params_queue)

            self.remote_count += 1
            logger.info('Remote actor count: {}'.format(self.remote_count))

            remote_thread = threading.Thread(
F
fuyw 已提交
96
                target=self.run_remote_sample, args=(params_queue, ))
H
Hongsheng Zeng 已提交
97 98 99 100 101 102
            remote_thread.setDaemon(True)
            remote_thread.start()

        logger.info('All remote actors are ready, begin to learn.')
        self.start_time = time.time()

F
fuyw 已提交
103
    def run_remote_sample(self, params_queue):
H
Hongsheng Zeng 已提交
104 105
        """ Sample data from remote actor and update parameters of remote actor.
        """
F
fuyw 已提交
106 107
        remote_actor = Actor(self.config)

H
Hongsheng Zeng 已提交
108 109 110
        cnt = 0
        while True:
            latest_params = params_queue.get()
B
Bo Zhou 已提交
111
            remote_actor.set_weights(latest_params)
H
Hongsheng Zeng 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125
            batch = remote_actor.sample()

            self.sample_data_queue.put(batch)

            cnt += 1
            if cnt % self.config['get_remote_metrics_interval'] == 0:
                metrics = remote_actor.get_metrics()
                if metrics:
                    self.remote_metrics_queue.put(metrics)

    def step(self):
        """
        1. kick off all actors to synchronize parameters and sample data;
        2. collect sample data of all actors;
F
fuyw 已提交
126
        3. update parameters.
H
Hongsheng Zeng 已提交
127 128
        """

B
Bo Zhou 已提交
129
        latest_params = self.agent.get_weights()
H
Hongsheng Zeng 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
        for params_queue in self.params_queues:
            params_queue.put(latest_params)

        train_batch = defaultdict(list)
        for i in range(self.config['actor_num']):
            sample_data = self.sample_data_queue.get()
            for key, value in sample_data.items():
                train_batch[key].append(value)

            self.sample_total_steps += sample_data['obs'].shape[0]

        for key, value in train_batch.items():
            train_batch[key] = np.concatenate(value)

        with self.learn_time_stat:
            total_loss, pi_loss, vf_loss, entropy, lr, entropy_coeff = self.agent.learn(
                obs_np=train_batch['obs'],
                actions_np=train_batch['actions'],
                advantages_np=train_batch['advantages'],
                target_values_np=train_batch['target_values'])

        self.total_loss_stat.add(total_loss)
        self.pi_loss_stat.add(pi_loss)
        self.vf_loss_stat.add(vf_loss)
        self.entropy_stat.add(entropy)
        self.lr = lr
        self.entropy_coeff = entropy_coeff

    def log_metrics(self):
        """ Log metrics of learner and actors
        """
        if self.start_time is None:
            return

        metrics = []
        while True:
            try:
                metric = self.remote_metrics_queue.get_nowait()
                metrics.append(metric)
            except queue.Empty:
                break

        episode_rewards, episode_steps = [], []
        for x in metrics:
            episode_rewards.extend(x['episode_rewards'])
            episode_steps.extend(x['episode_steps'])
        max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
                max_episode_steps, mean_episode_steps, min_episode_steps =\
                None, None, None, None, None, None
        if episode_rewards:
            mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
            max_episode_rewards = np.max(np.array(episode_rewards).flatten())
            min_episode_rewards = np.min(np.array(episode_rewards).flatten())

            mean_episode_steps = np.mean(np.array(episode_steps).flatten())
            max_episode_steps = np.max(np.array(episode_steps).flatten())
            min_episode_steps = np.min(np.array(episode_steps).flatten())

        metric = {
189
            'sample_steps': self.sample_total_steps,
H
Hongsheng Zeng 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
            'max_episode_rewards': max_episode_rewards,
            'mean_episode_rewards': mean_episode_rewards,
            'min_episode_rewards': min_episode_rewards,
            'max_episode_steps': max_episode_steps,
            'mean_episode_steps': mean_episode_steps,
            'min_episode_steps': min_episode_steps,
            'total_loss': self.total_loss_stat.mean,
            'pi_loss': self.pi_loss_stat.mean,
            'vf_loss': self.vf_loss_stat.mean,
            'entropy': self.entropy_stat.mean,
            'learn_time_s': self.learn_time_stat.mean,
            'elapsed_time_s': int(time.time() - self.start_time),
            'lr': self.lr,
            'entropy_coeff': self.entropy_coeff,
        }

F
fuyw 已提交
206 207
        for key, value in metric.items():
            if value is not None:
208
                summary.add_scalar(key, value, self.sample_total_steps)
F
fuyw 已提交
209

H
Hongsheng Zeng 已提交
210 211
        logger.info(metric)

H
Hongsheng Zeng 已提交
212 213
    def should_stop(self):
        return self.sample_total_steps >= self.config['max_sample_steps']
H
Hongsheng Zeng 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226


if __name__ == '__main__':
    from a2c_config import config

    learner = Learner(config)
    assert config['log_metrics_interval_s'] > 0

    while not learner.should_stop():
        start = time.time()
        while time.time() - start < config['log_metrics_interval_s']:
            learner.step()
        learner.log_metrics()