ddqn.py 3.7 KB
Newer Older
L
LI Yunxiang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import warnings
warnings.simplefilter('default')

import copy
import numpy as np
import paddle.fluid as fluid
from parl.core.fluid.algorithm import Algorithm
from parl.core.fluid import layers

24 25
__all__ = ['DDQN']

L
LI Yunxiang 已提交
26 27

class DDQN(Algorithm):
28
    def __init__(self, model, act_dim=None, gamma=None, lr=None):
L
LI Yunxiang 已提交
29 30
        """ Double DQN algorithm
        Args:
31 32
            model (parl.Model): model defining forward network of Q function
            act_dim (int): dimension of the action space
L
LI Yunxiang 已提交
33
            gamma (float): discounted factor for reward computation.
34
            lr (float): learning rate.
L
LI Yunxiang 已提交
35 36 37 38 39 40 41 42 43
        """
        self.model = model
        self.target_model = copy.deepcopy(model)

        assert isinstance(act_dim, int)
        assert isinstance(gamma, float)

        self.act_dim = act_dim
        self.gamma = gamma
44
        self.lr = lr
L
LI Yunxiang 已提交
45 46

    def predict(self, obs):
47 48
        """ use value model self.model to predict the action value
        """
L
LI Yunxiang 已提交
49 50
        return self.model.value(obs)

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
    def learn(self,
              obs,
              action,
              reward,
              next_obs,
              terminal,
              learning_rate=None):
        """ update value model self.model with DQN algorithm
        """
        # Support the modification of learning_rate
        if learning_rate is None:
            assert isinstance(
                self.lr,
                float), "Please set the learning rate of DQN in initializaion."
            learning_rate = self.lr

L
LI Yunxiang 已提交
67 68 69 70 71 72 73 74 75 76 77
        pred_value = self.model.value(obs)
        action_onehot = layers.one_hot(action, self.act_dim)
        action_onehot = layers.cast(action_onehot, dtype='float32')
        pred_action_value = layers.reduce_sum(
            layers.elementwise_mul(action_onehot, pred_value), dim=1)

        # choose acc. to behavior network
        next_action_value = self.model.value(next_obs)
        greedy_action = layers.argmax(next_action_value, axis=-1)

        # calculate the target q value with target network
Z
zenghsh3 已提交
78
        batch_size = layers.cast(layers.shape(greedy_action)[0], dtype='int64')
L
LI Yunxiang 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
        range_tmp = layers.range(
            start=0, end=batch_size, step=1, dtype='int64') * self.act_dim
        a_indices = range_tmp + greedy_action
        a_indices = layers.cast(a_indices, dtype='int32')
        next_pred_value = self.target_model.value(next_obs)
        next_pred_value = layers.reshape(
            next_pred_value, shape=[
                -1,
            ])
        max_v = layers.gather(next_pred_value, a_indices)
        max_v = layers.reshape(
            max_v, shape=[
                -1,
            ])
        max_v.stop_gradient = True

        target = reward + (
            1.0 - layers.cast(terminal, dtype='float32')) * self.gamma * max_v
        cost = layers.square_error_cost(pred_action_value, target)
        cost = layers.reduce_mean(cost)
        optimizer = fluid.optimizer.Adam(
            learning_rate=learning_rate, epsilon=1e-3)
        optimizer.minimize(cost)
        return cost

L
LI Yunxiang 已提交
104
    def sync_target(self):
L
LI Yunxiang 已提交
105 106 107
        """ sync weights of self.model to self.target_model
        """
        self.model.sync_weights_to(self.target_model)