Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
CSDN 技术社区
ai
chatCSDN
提交
60942751
C
chatCSDN
项目概览
CSDN 技术社区
/
ai
/
chatCSDN
通知
107
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
C
chatCSDN
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
60942751
编写于
3月 10, 2023
作者:
U
u010280923
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
opt reward model
上级
3859a713
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
58 addition
and
4 deletion
+58
-4
src/rlhf/reward.py
src/rlhf/reward.py
+58
-4
未找到文件。
src/rlhf/reward.py
浏览文件 @
60942751
...
@@ -10,6 +10,9 @@ from torch import nn
...
@@ -10,6 +10,9 @@ from torch import nn
import
torch.nn.functional
as
F
import
torch.nn.functional
as
F
import
pytorch_lightning
as
pl
import
pytorch_lightning
as
pl
from
pytorch_lightning.utilities
import
rank_zero_info
from
pytorch_lightning.utilities
import
rank_zero_info
import
deepspeed
from
deepspeed.ops.adam
import
DeepSpeedCPUAdam
,
FusedAdam
from
pytorch_lightning.strategies
import
DeepSpeedStrategy
from
einops
import
rearrange
,
repeat
,
reduce
,
pack
,
unpack
from
einops
import
rearrange
,
repeat
,
reduce
,
pack
,
unpack
from
einops.layers.torch
import
Rearrange
,
Reduce
from
einops.layers.torch
import
Rearrange
,
Reduce
...
@@ -79,11 +82,62 @@ class RewardModel(pl.LightningModule):
...
@@ -79,11 +82,62 @@ class RewardModel(pl.LightningModule):
self
.
load_state_dict
(
torch
.
load
(
str
(
path
)))
self
.
load_state_dict
(
torch
.
load
(
str
(
path
)))
def
configure_optimizers
(
self
):
def
configure_optimizers
(
self
):
# 论文中的参数:
args
=
self
.
args
optimizer
=
torch
.
optim
.
Adam
(
self
.
parameters
(),
lr
=
1e-5
,
betas
=
(
0.9
,
0.95
)
)
if
args
.
layerwise_lr
>
0
:
# optimizer = torch.optim.Adam(self.parameters(), lr=self.args.lr_init, betas=self.args.betas)
lr_1x
=
set
()
lr_2x
=
set
()
return
optimizer
lr_3x
=
set
()
for
n
,
p
in
self
.
named_parameters
():
if
"time_mix"
in
n
:
if
args
.
my_pile_stage
==
2
:
lr_2x
.
add
(
n
)
else
:
lr_1x
.
add
(
n
)
elif
"time_decay"
in
n
:
if
args
.
my_pile_stage
==
2
:
lr_3x
.
add
(
n
)
else
:
lr_2x
.
add
(
n
)
elif
"time_first"
in
n
:
lr_3x
.
add
(
n
)
else
:
lr_1x
.
add
(
n
)
lr_1x
=
sorted
(
list
(
lr_1x
))
lr_2x
=
sorted
(
list
(
lr_2x
))
lr_3x
=
sorted
(
list
(
lr_3x
))
# print('1x', lr_1x)
# print('2x', lr_2x)
# print('3x', lr_3x)
param_dict
=
{
n
:
p
for
n
,
p
in
self
.
named_parameters
()}
if
args
.
my_pile_stage
==
2
:
optim_groups
=
[
{
"params"
:
[
param_dict
[
n
]
for
n
in
lr_1x
],
"weight_decay"
:
0.0
,
"my_lr_scale"
:
1.0
},
{
"params"
:
[
param_dict
[
n
]
for
n
in
lr_2x
],
"weight_decay"
:
0.0
,
"my_lr_scale"
:
5.0
},
# test: 2e-3 / args.lr_init},
{
"params"
:
[
param_dict
[
n
]
for
n
in
lr_3x
],
"weight_decay"
:
0.0
,
"my_lr_scale"
:
5.0
},
# test: 3e-3 / args.lr_init},
]
else
:
optim_groups
=
[
{
"params"
:
[
param_dict
[
n
]
for
n
in
lr_1x
],
"weight_decay"
:
0.0
,
"my_lr_scale"
:
1.0
},
{
"params"
:
[
param_dict
[
n
]
for
n
in
lr_2x
],
"weight_decay"
:
0.0
,
"my_lr_scale"
:
2.0
},
{
"params"
:
[
param_dict
[
n
]
for
n
in
lr_3x
],
"weight_decay"
:
0.0
,
"my_lr_scale"
:
3.0
},
]
else
:
optim_groups
=
[
{
"params"
:
[
p
for
n
,
p
in
self
.
named_parameters
()],
"weight_decay"
:
0.0
},
]
if
self
.
deepspeed_offload
:
return
DeepSpeedCPUAdam
(
optim_groups
,
lr
=
self
.
args
.
lr_init
,
betas
=
self
.
args
.
betas
,
eps
=
self
.
args
.
adam_eps
,
bias_correction
=
True
,
adamw_mode
=
False
,
weight_decay
=
0
,
amsgrad
=
False
)
return
FusedAdam
(
optim_groups
,
lr
=
self
.
args
.
lr_init
,
betas
=
self
.
args
.
betas
,
eps
=
self
.
args
.
adam_eps
,
bias_correction
=
True
,
adam_w_mode
=
False
,
weight_decay
=
0
,
amsgrad
=
False
)
# return ZeroOneAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, weight_decay=0, amsgrad=False, cuda_aware=False)
@
property
def
deepspeed_offload
(
self
)
->
bool
:
strategy
=
self
.
trainer
.
strategy
if
isinstance
(
strategy
,
DeepSpeedStrategy
):
cfg
=
strategy
.
config
[
"zero_optimization"
]
return
cfg
.
get
(
"offload_optimizer"
)
or
cfg
.
get
(
"offload_param"
)
return
False
def
single_forward
(
def
single_forward
(
self
,
self
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录