cartpole_agent.py 2.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import parl
import torch
import numpy as np


class CartpoleAgent(parl.Agent):
    """Agent of Cartpole env.

    Args:
        algorithm(parl.Algorithm): algorithm used to solve the problem.

    """

    def __init__(self, algorithm):
R
rical730 已提交
29
        super(CartpoleAgent, self).__init__(algorithm)
30 31 32 33 34 35 36 37 38 39 40 41 42
        self.device = torch.device("cuda" if torch.cuda.
                                   is_available() else "cpu")

    def sample(self, obs):
        """Sample an action when given an observation

        Args:
            obs(np.float32): shape of (obs_dim,)
        
        Returns:
            action(int)
        """
        obs = torch.tensor(obs, device=self.device, dtype=torch.float)
R
rical730 已提交
43
        prob = self.alg.predict(obs).cpu()
44 45 46 47 48 49 50 51 52 53 54 55 56 57
        prob = prob.data.numpy()
        action = np.random.choice(len(prob), 1, p=prob)[0]
        return action

    def predict(self, obs):
        """Predict an action when given an observation

        Args:
            obs(np.float32): shape of (obs_dim,)
        
        Returns:
            action(int)
        """
        obs = torch.tensor(obs, device=self.device, dtype=torch.float)
R
rical730 已提交
58
        prob = self.alg.predict(obs)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
        _, action = prob.max(-1)
        return action.item()

    def learn(self, obs, action, reward):
        """Update model with an episode data

        Args:
            obs(np.float32): shape of (batch_size, obs_dim)
            action(np.int64): shape of (batch_size)
            reward(np.float32): shape of (batch_size)
        
        Returns:
            loss(float)

        """
        obs = torch.tensor(obs, device=self.device, dtype=torch.float)
        action = torch.tensor(action, device=self.device, dtype=torch.long)
        reward = torch.tensor(reward, device=self.device, dtype=torch.float)

R
rical730 已提交
78
        loss = self.alg.learn(obs, action, reward)
79
        return loss.item()