ppo.py 19.3 KB
Newer Older
U
u010280923 已提交
1 2 3 4 5 6 7 8 9 10 11
import math
from pathlib import Path
import copy
from tqdm import tqdm
from functools import partial
from collections import deque, namedtuple
from random import randrange

from beartype import beartype
from beartype.typing import List, Optional, Callable, Deque

U
u010280923 已提交
12 13 14
from einops import rearrange, repeat
from einops.layers.torch import Rearrange

U
u010280923 已提交
15 16 17 18 19 20 21 22
import torch
from torch import nn
import torch.nn.functional as F

from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence

U
u010280923 已提交
23
import pytorch_lightning as pl
U
u010280923 已提交
24
from pytorch_lightning.utilities import rank_zero_info
U
u010280923 已提交
25 26
from pytorch_lightning.strategies import DeepSpeedStrategy
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
U
u010280923 已提交
27

U
u010280923 已提交
28 29 30 31
from src.model import RWKV
from src.rlhf.reward import RewardModel
from src.rlhf.optimizer import get_optimizer
from src.rlhf.utils import masked_mean, eval_decorator
U
u010280923 已提交
32

每日一练社区's avatar
fix bug  
每日一练社区 已提交
33
# actor critic
U
u010280923 已提交
34 35 36 37 38 39 40 41 42 43 44

PPOActionCriticReturn = namedtuple('PPOActionCriticReturn', [
    'actions',
    'sequence',
    'mask',
    'prompt_mask',
    'action_logits',
    'values'
])

@beartype
U
u010280923 已提交
45
class ActorCritic(pl.LightningModule):
U
u010280923 已提交
46 47
    def __init__(
        self,
U
u010280923 已提交
48
        rwkv: RWKV,
U
u010280923 已提交
49
        args,
U
u010280923 已提交
50 51
        critic: Optional[RWKV] = None,
        pooled_values = False
U
u010280923 已提交
52 53
    ):
        super().__init__()
U
u010280923 已提交
54
        self.actor = rwkv
U
u010280923 已提交
55

U
u010280923 已提交
56
        self.critic = critic
U
u010280923 已提交
57

U
u010280923 已提交
58 59
        if not exists(self.critic):
            self.critic = copy.deepcopy(rwkv)
U
u010280923 已提交
60 61 62

        self.pooled_values = pooled_values
        self.value_head = nn.Sequential(
U
u010280923 已提交
63
            nn.Linear(args.n_embd, 1),
U
u010280923 已提交
64 65 66 67 68 69
            Rearrange('... 1 -> ...')
        )

        nn.init.zeros_(self.value_head[0].bias)
        nn.init.orthogonal_(self.value_head[0].weight, gain = math.sqrt(2))

U
u010280923 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    def configure_optimizers(self):
        args = self.args
        if args.layerwise_lr > 0:
            lr_1x = set()
            lr_2x = set()
            lr_3x = set()
            for n, p in self.named_parameters():
                if "time_mix" in n:
                    if args.my_pile_stage == 2:
                        lr_2x.add(n)
                    else:
                        lr_1x.add(n)
                elif "time_decay" in n:
                    if args.my_pile_stage == 2:
                        lr_3x.add(n)
                    else:
                        lr_2x.add(n)
                elif "time_first" in n:
                    lr_3x.add(n)
                else:
                    lr_1x.add(n)
            lr_1x = sorted(list(lr_1x))
            lr_2x = sorted(list(lr_2x))
            lr_3x = sorted(list(lr_3x))
            param_dict = {n: p for n, p in self.named_parameters()}
            if args.my_pile_stage == 2:
                optim_groups = [
                    {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0},
                    {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init},
                    {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init},
                ]
            else:
                optim_groups = [
                    {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0},
                    {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0},
                    {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0},
                ]
        else:
            optim_groups = [
                {"params": [p for n, p in self.named_parameters()], "weight_decay": 0.0},
            ]

        if self.deepspeed_offload:
            return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False)
        return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False)
        # return ZeroOneAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, weight_decay=0, amsgrad=False, cuda_aware=False)

    @property
    def deepspeed_offload(self) -> bool:
        strategy = self.trainer.strategy
        if isinstance(strategy, DeepSpeedStrategy):
            cfg = strategy.config["zero_optimization"]
            return cfg.get("offload_optimizer") or cfg.get("offload_param")
        return False

U
u010280923 已提交
125 126 127 128 129 130 131 132 133 134
    @torch.no_grad()
    @eval_decorator
    def generate(
        self,
        state,
        max_seq_len,
        eos_token = None,
        return_values = False,
        **kwargs
    ):
U
u010280923 已提交
135 136
        # 产生一条 response,相当于采取了一次 action
        actions = self.actor.generate(
U
u010280923 已提交
137 138 139 140 141 142 143
            max_seq_len,
            prompt = state,       
            eos_token = eos_token,     
            use_tqdm = True,
            **kwargs
        )

U
u010280923 已提交
144
        # 将 prompt (state) 和 response (action) 进行拼接
U
u010280923 已提交
145 146 147 148
        sequence = torch.cat((state, actions), dim = -1)
        action_len = actions.shape[-1]
        state_len = state.shape[-1]

U
u010280923 已提交
149
        # 构建 prompt_mask (state_mask) 和 response_mask (action_mask)
U
u010280923 已提交
150 151 152 153 154
        prompt_mask = torch.arange(sequence.shape[-1], device = state.device) < state_len
        prompt_mask = repeat(prompt_mask, 'n -> b n', b = sequence.shape[0])

        action_mask = ~prompt_mask

U
u010280923 已提交
155
        # 考虑 eos token
U
u010280923 已提交
156 157 158 159 160 161
        mask = None
        if exists(eos_token):
            mask = ((sequence == eos_token).cumsum(dim = -1) == 0)
            mask = F.pad(mask, (1, -1), value = True) # include eos token
            action_mask &= mask

U
u010280923 已提交
162 163
        # 将生成的 sequence 输入到 actor 中,得到 action_logits
        # 将生成的 sequence 输入到 critic 中,得到 value
U
u010280923 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
        action_logits, value = self.forward(
            sequence,
            mask = action_mask,
            return_values = return_values
        )        

        return PPOActionCriticReturn(
            actions,
            sequence,
            mask,
            prompt_mask,
            action_logits,
            value
        )

    def forward(
        self,
        x,
        mask = None,
        return_values = True
    ):
U
u010280923 已提交
185
        action_logits, _ = self.actor(
U
u010280923 已提交
186
            x,
U
u010280923 已提交
187
            ppo_train = True
U
u010280923 已提交
188 189 190 191 192
        )

        if not return_values:
            return action_logits, None

U
u010280923 已提交
193
        _, critic_embeds = self.critic(
U
u010280923 已提交
194 195
            x,
            return_only_embedding = True,
U
u010280923 已提交
196
            ppo_train = True
U
u010280923 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
        )

        if self.pooled_values:
            critic_embeds = shift(critic_embeds, shift = 1, dim = -2)
            critic_embeds = masked_mean(critic_embeds, mask, dim = 1)

        values = self.value_head(critic_embeds)

        return action_logits, values

# data

Memory = namedtuple('Memory', [
    'sequence',
    'prompt_mask',
    'mask',
    'action_prob',
    'action_log_prob',
    'reward',
    'value'
])

@beartype
class ExperienceDataset(Dataset):
    def __init__(
        self,
        data: List[torch.Tensor],
        device = None
    ):
        super().__init__()
        self.data = data
        self.device = device

    def __len__(self):
        return self.data[0].shape[0]

    def __getitem__(self, ind):
        return tuple(map(lambda t: t[ind].to(self.device), self.data))

def create_dataloader(data, batch_size, shuffle = True, device = None, **kwargs):
    ds = ExperienceDataset(data, device = device)
    return DataLoader(ds, batch_size = batch_size, shuffle = shuffle, **kwargs)

# helper functions

def exists(val):
    return val is not None

def default(val, d):
    return val if exists(val) else d

def masked_normalize(t, eps = 1e-5, mask = None, dim = None):
    dim = default(dim, tuple(range(t.ndim)))
    kwargs = dict(dim = dim, keepdim = True)

    mean = masked_mean(t, mask = mask, **kwargs)
    mean_centered = t - mean
    var = masked_mean(mean_centered ** 2, mask = mask, **kwargs)

    return mean_centered * var.clamp(min = eps).rsqrt()

def pad_sequence_fixed(sequences, *args, **kwargs):
    first_el = sequences[0]
    has_no_dimension = first_el.ndim == 0

    # if no dimensions, add a single dimension
    if has_no_dimension:
        sequences = tuple(map(lambda t: t[None], sequences))

    out = pad_sequence(sequences, *args, **kwargs)

    if has_no_dimension:
        out = rearrange(out, '... 1 -> ...')

    return out

def log(t, eps = 1e-20):
    return torch.log(t.clamp(min = eps))

def log_prob(prob, indices):
    assert prob.shape[:2] == indices.shape, f'preceding shapes of prob {prob.shape[:2]} and indices {indices.shape} must match'
    return log(prob.gather(-1, indices[..., None])).squeeze(-1)

def shift(t, value = 0, shift = 1, dim = -1):
    zeros = (0, 0) * (-dim - 1)
    return F.pad(t, (*zeros, shift, -shift), value = value)

def masked_entropy(prob, dim = -1, mask = None):
    entropies = (prob * log(prob)).sum(dim = -1)
    return masked_mean(entropies, mask = mask).mean()

def masked_kl_div(prob1, prob2, mask = None):
    """
    need to account for variable sequence lengths, therefore not using the built-in functional version
    """
    kl_divs = (prob1 * (log(prob2) - log(prob1))).sum(dim = -1)

    if not exists(mask):
        return kl_divs.mean()

    return masked_mean(kl_divs, mask).mean()

def clipped_value_loss(values, rewards, old_values, clip):
    value_clipped = old_values + (values - old_values).clamp(-clip, clip)
    value_loss_1 = (value_clipped.flatten() - rewards) ** 2
    value_loss_2 = (values.flatten() - rewards) ** 2
    return torch.mean(torch.max(value_loss_1, value_loss_2))

U
u010280923 已提交
305
# rlhf
U
u010280923 已提交
306 307

@beartype
U
u010280923 已提交
308
class RLHF(pl.LightningModule):
U
u010280923 已提交
309 310
    def __init__(
        self,
U
u010280923 已提交
311 312 313
        args,
        rwkv: RWKV,
        reward_model: RewardModel
U
u010280923 已提交
314 315 316
    ):
        super().__init__()

U
u010280923 已提交
317
        self.args = args
U
u010280923 已提交
318
        self.rwkv = rwkv
U
u010280923 已提交
319

U
u010280923 已提交
320 321 322
        # 使用 RWKV 初始化 actor_critic
        actor_critic = ActorCritic(
            rwkv = self.rwkv,
U
u010280923 已提交
323
            args = self.args,
U
u010280923 已提交
324
            pooled_values = args.critic_pooled_values
U
u010280923 已提交
325
        ).to(self.rwkv.device)
U
u010280923 已提交
326 327 328

        self.actor_critic = actor_critic

U
u010280923 已提交
329
        # 将 reward_model 设置为 evaluation 模式 
U
u010280923 已提交
330 331 332 333 334 335 336 337
        self.reward_model = reward_model.eval()

    def save(self, filepath = './checkpoint.pt'):
        torch.save(self.actor_critic.state_dict(), filepath)

    def load(self, filepath = './checkpoint.pt'):
        state_dict = torch.load(filepath)
        self.actor_critic.load_state_dict(state_dict)
U
u010280923 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
    
    def configure_optimizers(self):
        args = self.args
        if args.layerwise_lr > 0:
            lr_1x = set()
            lr_2x = set()
            lr_3x = set()
            for n, p in self.named_parameters():
                if "time_mix" in n:
                    if args.my_pile_stage == 2:
                        lr_2x.add(n)
                    else:
                        lr_1x.add(n)
                elif "time_decay" in n:
                    if args.my_pile_stage == 2:
                        lr_3x.add(n)
                    else:
                        lr_2x.add(n)
                elif "time_first" in n:
                    lr_3x.add(n)
                else:
                    lr_1x.add(n)
            lr_1x = sorted(list(lr_1x))
            lr_2x = sorted(list(lr_2x))
            lr_3x = sorted(list(lr_3x))
            param_dict = {n: p for n, p in self.named_parameters()}
            if args.my_pile_stage == 2:
                optim_groups = [
                    {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0},
                    {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init},
                    {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init},
                ]
            else:
                optim_groups = [
                    {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0},
                    {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0},
                    {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0},
                ]
        else:
            optim_groups = [
                {"params": [p for n, p in self.named_parameters()], "weight_decay": 0.0},
            ]

        if self.deepspeed_offload:
            return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False)
        return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False)
        # return ZeroOneAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, weight_decay=0, amsgrad=False, cuda_aware=False)

    @property
    def deepspeed_offload(self) -> bool:
        strategy = self.trainer.strategy
        if isinstance(strategy, DeepSpeedStrategy):
            cfg = strategy.config["zero_optimization"]
            return cfg.get("offload_optimizer") or cfg.get("offload_param")
        return False
U
u010280923 已提交
393 394 395 396 397 398 399 400 401 402

    @torch.no_grad()
    def generate(
        self,
        max_seq_len,
        *args,
        prompt,
        num_samples = 4,  # sample 4 per prompt and select the one with highest reward
        **kwargs
    ):
U
u010280923 已提交
403 404 405
        ''' 未参与训练,仅推理时使用
        '''

U
u010280923 已提交
406 407 408
        assert prompt.ndim == 1, 'only one prompt allowed at a time for now'
        prompt = repeat(prompt, 'n -> b n', b = num_samples)

U
u010280923 已提交
409
        self.actor_critic.eval()
U
u010280923 已提交
410 411 412 413 414 415 416
        (
            actions,
            sequences,
            mask,
            prompt_mask,
            action_logits,
            _
U
u010280923 已提交
417
        ) = self.actor_critic.generate(
U
u010280923 已提交
418 419 420 421 422 423 424
            prompt,
            *args,
            max_seq_len = max_seq_len,
            return_values = False,
            **kwargs
        )

U
u010280923 已提交
425
        rewards = self.reward_model(
U
u010280923 已提交
426 427 428 429 430 431 432 433 434 435 436 437 438
            sequences,
            prompt_mask = prompt_mask,
            mask = mask,
            sample = True
        )

        best_sequence_index = rewards.topk(1, dim = -1).indices

        best_sequence = sequences[best_sequence_index]
        best_sequence = rearrange(best_sequence, '1 ... -> ...')

        return best_sequence

U
u010280923 已提交
439 440 441 442 443 444 445 446
    def training_step(self, batch, batch_idx):
        sequences, \
        prompt_masks, \
        masks, \
        old_action_probs, \
        old_log_probs, \
        rewards, \
        old_values = batch
U
u010280923 已提交
447 448

        # PPO training
U
u010280923 已提交
449
        action_masks = ~prompt_masks & masks
U
u010280923 已提交
450

U
u010280923 已提交
451 452 453 454
        action_logits, values = self.actor_critic(
            sequences,
            mask = action_masks
        )
U
u010280923 已提交
455

U
u010280923 已提交
456
        action_logits = shift(action_logits, shift=1, dim=-2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
U
u010280923 已提交
457
        action_len = old_log_probs.shape[-1]
U
u010280923 已提交
458

U
u010280923 已提交
459 460 461
        action_probs = action_logits.softmax(dim = -1)
        action_log_probs = log_prob(action_probs, sequences)
        action_log_probs = action_log_probs[:, -action_len:]
U
u010280923 已提交
462

U
u010280923 已提交
463
        # calculate entropies, taking into account which part of the sequence is actually an action
U
u010280923 已提交
464

U
u010280923 已提交
465
        entropies = masked_entropy(action_probs, mask = action_masks)
U
u010280923 已提交
466

U
u010280923 已提交
467
        # calculate kl div between old action probs and new ones, taking into account which part of the sequence is action or not
U
u010280923 已提交
468

U
u010280923 已提交
469
        kl_div_loss = 0.
U
u010280923 已提交
470

U
u010280923 已提交
471 472
        if self.args.kl_div_loss_weight > 0:
            kl_div_loss = masked_kl_div(action_probs, old_action_probs, mask = action_masks) * self.args.kl_div_loss_weight
U
u010280923 已提交
473

U
u010280923 已提交
474
        # handle non-pooled values
U
u010280923 已提交
475

U
u010280923 已提交
476
        normalize_kwargs = dict()
U
u010280923 已提交
477

U
u010280923 已提交
478 479
        if old_values.ndim == 2:
            old_values, values = map(lambda t: shift(t, shift = 1, dim = -2), (old_values, values))
U
u010280923 已提交
480

U
u010280923 已提交
481 482 483 484
            old_values = old_values[:, -action_len:]
            values = values[:, -action_len:]
            rewards = rearrange(rewards, 'b -> b 1')
            normalize_kwargs = dict(dim = -1, mask = action_masks[:, -action_len:])
U
u010280923 已提交
485

U
u010280923 已提交
486 487
        if values.ndim < rewards.ndim:
            values = rearrange(values, '... -> ... 1')
U
u010280923 已提交
488

U
u010280923 已提交
489
        # calculate clipped surrogate objective, classic PPO loss
U
u010280923 已提交
490

U
u010280923 已提交
491 492
        ratios = (action_log_probs - old_log_probs).exp()
        advantages = masked_normalize(rewards - old_values, **normalize_kwargs)
U
u010280923 已提交
493

U
u010280923 已提交
494 495
        if advantages.ndim == 1:
            advantages = rearrange(advantages, 'b -> b 1')
U
u010280923 已提交
496

U
u010280923 已提交
497 498 499
        surr1 = ratios * advantages
        surr2 = ratios.clamp(1 - self.args.eps_clip, 1 + self.args.eps_clip) * advantages
        policy_loss = - torch.min(surr1, surr2) - self.args.beta_s * entropies
U
u010280923 已提交
500

U
u010280923 已提交
501 502
        # actor loss (也称为 policy loss, 是最终要使用模型的 loss)
        actor_loss = policy_loss.mean() + kl_div_loss
U
u010280923 已提交
503

U
u010280923 已提交
504 505 506 507
        # critic loss (也称为 value loss)
        # update value network separate from policy network
        critic_loss = clipped_value_loss(values, rewards, old_values, self.args.value_clip)
        critic_loss = critic_loss.mean()
U
u010280923 已提交
508

U
u010280923 已提交
509
        return {'actor_loss': actor_loss.item(), 'critic_loss': critic_loss.item()}
U
u010280923 已提交
510

U
u010280923 已提交
511 512 513 514
    def make_experience(self, prompts, eos_token=None, temperature=1):
        ''' 通过与 environment 交互产生训练数据
        '''
        
U
u010280923 已提交
515 516
        device = self.device

U
u010280923 已提交
517 518 519 520
        # select a bunch of random states (prompts)
        # and get the action (sampled sequence from rwkv as well as the action probs)
        # also calculate the reward using reward model and store
        # 随机挑选一条 prompt
U
u010280923 已提交
521 522
        rand_prompt_index = randrange(0, len(prompts))
        state = prompts[rand_prompt_index]
U
u010280923 已提交
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546

        # remove padding from state
        state_mask = state != self.args.pad_value
        state = state[state_mask]

        # get predicted sequence
        # 与 environment 进行交互,其中返回的:
        #   action 是 response,
        #   sequence 是 prompt + response, 
        (
            actions,
            sequence,
            mask,
            prompt_mask,
            action_logits,
            value
        ) = self.actor_critic.generate(
            rearrange(state, 'n -> 1 n'),
            max_seq_len = self.args.ctx_len,
            eos_token = eos_token,
            temperature = temperature,
            return_values = True
        )
        action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
U
u010280923 已提交
547

U
u010280923 已提交
548
        action_prob = action_logits.softmax(dim = -1)
U
u010280923 已提交
549

U
u010280923 已提交
550 551 552
        action_len = actions.shape[-1]
        action_log_prob = log_prob(action_prob, sequence)
        action_log_prob = action_log_prob[:, -action_len:]
U
u010280923 已提交
553

U
u010280923 已提交
554
        actions = rearrange(actions, '1 ... -> ...')
U
u010280923 已提交
555

U
u010280923 已提交
556 557
        # get reward as given by supervised trained reward model
        sequence = torch.cat((state, actions), dim = 0)
U
u010280923 已提交
558

U
u010280923 已提交
559 560
        prompt_length = len(state)
        prompt_mask = torch.arange(sequence.shape[-1], device = device) < prompt_length
U
u010280923 已提交
561

U
u010280923 已提交
562 563 564
        sequence = rearrange(sequence, 'n -> 1 n')
        prompt_mask = rearrange(prompt_mask, 'n -> 1 n')
        mask = rearrange(mask, 'n -> 1 n') if exists(mask) else torch.ones(sequence.shape, dtype = torch.bool, device = device)
U
u010280923 已提交
565

U
u010280923 已提交
566 567 568 569 570 571
        reward = self.reward_model(
            sequence,
            prompt_mask = prompt_mask,
            mask = mask,
            sample = True
        )
U
u010280923 已提交
572

U
u010280923 已提交
573 574 575 576 577 578 579 580 581
        return (
            sequence,
            prompt_mask,
            mask,
            action_prob,
            action_log_prob,
            reward,
            value
        )