DuelingDQN_agent.py 7.5 KB
Newer Older
1 2
#-*- coding: utf-8 -*-

3 4
import math
import numpy as np
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from tqdm import tqdm
from utils import fluid_flatten


class DuelingDQNModel(object):
    def __init__(self, state_dim, action_dim, gamma, hist_len, use_cuda=False):
        self.img_height = state_dim[0]
        self.img_width = state_dim[1]
        self.action_dim = action_dim
        self.gamma = gamma
        self.exploration = 1.1
        self.update_target_steps = 10000 // 4
        self.hist_len = hist_len
        self.use_cuda = use_cuda

        self.global_step = 0
        self._build_net()

    def _get_inputs(self):
        return fluid.layers.data(
                   name='state',
                   shape=[self.hist_len, self.img_height, self.img_width],
                   dtype='float32'), \
               fluid.layers.data(
                   name='action', shape=[1], dtype='int32'), \
               fluid.layers.data(
                   name='reward', shape=[], dtype='float32'), \
               fluid.layers.data(
                   name='next_s',
                   shape=[self.hist_len, self.img_height, self.img_width],
                   dtype='float32'), \
               fluid.layers.data(
                   name='isOver', shape=[], dtype='bool')

    def _build_net(self):
42 43 44
        self.predict_program = fluid.Program()
        self.train_program = fluid.Program()
        self._sync_program = fluid.Program()
45

46 47 48
        with fluid.program_guard(self.predict_program):
            state, action, reward, next_s, isOver = self._get_inputs()
            self.pred_value = self.get_DQN_prediction(state)
49

50 51 52
        with fluid.program_guard(self.train_program):
            state, action, reward, next_s, isOver = self._get_inputs()
            pred_value = self.get_DQN_prediction(state)
53

54
            reward = fluid.layers.clip(reward, min=-1.0, max=1.0)
55

56 57
            action_onehot = fluid.layers.one_hot(action, self.action_dim)
            action_onehot = fluid.layers.cast(action_onehot, dtype='float32')
58

59 60
            pred_action_value = fluid.layers.reduce_sum(
                fluid.layers.elementwise_mul(action_onehot, pred_value), dim=1)
61

62 63 64
            targetQ_predict_value = self.get_DQN_prediction(next_s, target=True)
            best_v = fluid.layers.reduce_max(targetQ_predict_value, dim=1)
            best_v.stop_gradient = True
65

66 67 68 69
            target = reward + (1.0 - fluid.layers.cast(
                isOver, dtype='float32')) * self.gamma * best_v
            cost = fluid.layers.square_error_cost(pred_action_value, target)
            cost = fluid.layers.reduce_mean(cost)
70

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
            optimizer = fluid.optimizer.Adam(1e-3 * 0.5, epsilon=1e-3)
            optimizer.minimize(cost)

        vars = list(self.train_program.list_vars())
        policy_vars = list(filter(
            lambda x: 'GRAD' not in x.name and 'policy' in x.name, vars))
        target_vars = list(filter(
            lambda x: 'GRAD' not in x.name and 'target' in x.name, vars))
        policy_vars.sort(key=lambda x: x.name)
        target_vars.sort(key=lambda x: x.name)
        
        self._sync_program = fluid.Program()
        with fluid.program_guard(self._sync_program):
            sync_ops = []
            for i, var in enumerate(policy_vars):
                sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])
                sync_ops.append(sync_op)
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

        # fluid exe
        place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
        self.exe = fluid.Executor(place)
        self.exe.run(fluid.default_startup_program())

    def get_DQN_prediction(self, image, target=False):
        image = image / 255.0

        variable_field = 'target' if target else 'policy'

        conv1 = fluid.layers.conv2d(
            input=image,
            num_filters=32,
            filter_size=[5, 5],
            stride=[1, 1],
            padding=[2, 2],
            act='relu',
            param_attr=ParamAttr(name='{}_conv1'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv1_b'.format(variable_field)))
        max_pool1 = fluid.layers.pool2d(
            input=conv1, pool_size=[2, 2], pool_stride=[2, 2], pool_type='max')

        conv2 = fluid.layers.conv2d(
            input=max_pool1,
            num_filters=32,
            filter_size=[5, 5],
            stride=[1, 1],
            padding=[2, 2],
            act='relu',
            param_attr=ParamAttr(name='{}_conv2'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv2_b'.format(variable_field)))
        max_pool2 = fluid.layers.pool2d(
            input=conv2, pool_size=[2, 2], pool_stride=[2, 2], pool_type='max')

        conv3 = fluid.layers.conv2d(
            input=max_pool2,
            num_filters=64,
            filter_size=[4, 4],
            stride=[1, 1],
            padding=[1, 1],
            act='relu',
            param_attr=ParamAttr(name='{}_conv3'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv3_b'.format(variable_field)))
        max_pool3 = fluid.layers.pool2d(
            input=conv3, pool_size=[2, 2], pool_stride=[2, 2], pool_type='max')

        conv4 = fluid.layers.conv2d(
            input=max_pool3,
            num_filters=64,
            filter_size=[3, 3],
            stride=[1, 1],
            padding=[1, 1],
            act='relu',
            param_attr=ParamAttr(name='{}_conv4'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv4_b'.format(variable_field)))

        flatten = fluid_flatten(conv4)

        value = fluid.layers.fc(
            input=flatten,
            size=1,
            param_attr=ParamAttr(name='{}_value_fc'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_value_fc_b'.format(variable_field)))

        advantage = fluid.layers.fc(
            input=flatten,
            size=self.action_dim,
            param_attr=ParamAttr(name='{}_advantage_fc'.format(variable_field)),
            bias_attr=ParamAttr(
                name='{}_advantage_fc_b'.format(variable_field)))

        Q = advantage + (value - fluid.layers.reduce_mean(
            advantage, dim=1, keep_dim=True))
        return Q


    def act(self, state, train_or_test):
        sample = np.random.random()
        if train_or_test == 'train' and sample < self.exploration:
            act = np.random.randint(self.action_dim)
        else:
            if np.random.random() < 0.01:
                act = np.random.randint(self.action_dim)
            else:
                state = np.expand_dims(state, axis=0)
                pred_Q = self.exe.run(self.predict_program,
                                      feed={'state': state.astype('float32')},
                                      fetch_list=[self.pred_value])[0]
                pred_Q = np.squeeze(pred_Q, axis=0)
                act = np.argmax(pred_Q)
        if train_or_test == 'train':
            self.exploration = max(0.1, self.exploration - 1e-6)
        return act

    def train(self, state, action, reward, next_state, isOver):
        if self.global_step % self.update_target_steps == 0:
            self.sync_target_network()
        self.global_step += 1

        action = np.expand_dims(action, -1)
189 190 191 192 193 194 195 196
        self.exe.run(self.train_program,
                     feed={
                         'state': state.astype('float32'),
                         'action': action.astype('int32'),
                         'reward': reward,
                         'next_s': next_state.astype('float32'),
                         'isOver': isOver
                     })
197 198 199

    def sync_target_network(self):
        self.exe.run(self._sync_program)