Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Greenplum
DeepSpeed
提交
5d14afd2
D
DeepSpeed
项目概览
Greenplum
/
DeepSpeed
上一次同步 1 年多
通知
10
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeed
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
5d14afd2
编写于
6月 03, 2023
作者:
D
digger yu
提交者:
GitHub
6月 02, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix typo deepspeed/runtime (#3663)
Co-authored-by:
N
Olatunji Ruwase
<
olruwase@microsoft.com
>
上级
460bec46
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
7 addition
and
7 deletion
+7
-7
deepspeed/runtime/zero/partition_parameters.py
deepspeed/runtime/zero/partition_parameters.py
+4
-4
deepspeed/runtime/zero/stage3.py
deepspeed/runtime/zero/stage3.py
+2
-2
deepspeed/runtime/zero/stage_1_and_2.py
deepspeed/runtime/zero/stage_1_and_2.py
+1
-1
未找到文件。
deepspeed/runtime/zero/partition_parameters.py
浏览文件 @
5d14afd2
...
...
@@ -800,9 +800,9 @@ class Init(InsertPostInitMethodToModuleSubClasses):
f
'"nvme_path" in DeepSpeed Config cannot be None if remote device is
{
OffloadDeviceEnum
.
nvme
}
'
def
_post_init_method
(
self
,
module
):
#see_memory_usage(f"Before converting par
ma
s in {module.__class__.__name__}", force=False)
#see_memory_usage(f"Before converting par
am
s in {module.__class__.__name__}", force=False)
print_rank_0
(
f
'Converting Params in
{
module
.
__class__
.
__name__
}
'
,
force
=
False
)
see_memory_usage
(
f
"Before converting and partitioning par
ma
s in
{
module
.
__class__
.
__name__
}
"
,
force
=
False
)
see_memory_usage
(
f
"Before converting and partitioning par
am
s in
{
module
.
__class__
.
__name__
}
"
,
force
=
False
)
global
param_count
for
name
,
param
in
module
.
named_parameters
(
recurse
=
False
):
...
...
@@ -825,7 +825,7 @@ class Init(InsertPostInitMethodToModuleSubClasses):
param
.
partition
()
see_memory_usage
(
f
"Param count
{
param_count
}
. After converting and partitioning par
ma
s in
{
module
.
__class__
.
__name__
}
"
,
f
"Param count
{
param_count
}
. After converting and partitioning par
am
s in
{
module
.
__class__
.
__name__
}
"
,
force
=
False
)
def
_convert_to_deepspeed_param
(
self
,
param
):
...
...
@@ -1404,7 +1404,7 @@ class Init(InsertPostInitMethodToModuleSubClasses):
partition_size
=
param
.
ds_tensor
.
ds_numel
start
=
self
.
get_partition_rank
()
*
partition_size
end
=
start
+
partition_size
#print_rank_0("REduce scatter was executed for p
ra
am {param.ds_id}")
#print_rank_0("REduce scatter was executed for p
ar
am {param.ds_id}")
if
start
<
param
.
ds_numel
and
end
>
param
.
ds_numel
:
elements
=
param
.
ds_numel
-
start
param
.
grad
.
view
(
-
1
).
narrow
(
0
,
start
,
elements
).
copy_
(
reduced_partition
.
narrow
(
0
,
0
,
elements
))
...
...
deepspeed/runtime/zero/stage3.py
浏览文件 @
5d14afd2
...
...
@@ -892,7 +892,7 @@ class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer):
else
:
self
.
fp32_partitioned_groups_flat
[
i
].
grad
=
gradient_buffer
.
narrow
(
0
,
0
,
num_elements
)
# Initialize the optimizer states with the flatten
d
ed fp32 partition.
# Initialize the optimizer states with the flattened fp32 partition.
if
not
is_adagrad
:
self
.
_optimizer_step
(
i
)
...
...
@@ -906,7 +906,7 @@ class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer):
f
'[End] Initialize optimizer states
{
i
}
/
{
num_subgroups
}
subgroups, num_elems:
{
num_elements
}
, swappable opt/param:
{
swappable_optimizer_subgroup
}
/
{
swappable_param_subgroup
}
'
,
force
=
False
)
# Initialize the optimizer states with the flatten
d
ed fp32 partition.
# Initialize the optimizer states with the flattened fp32 partition.
if
is_adagrad
:
self
.
optimizer
=
torch
.
optim
.
Adagrad
(
self
.
fp32_partitioned_groups_flat
,
**
self
.
optimizer
.
defaults
)
...
...
deepspeed/runtime/zero/stage_1_and_2.py
浏览文件 @
5d14afd2
...
...
@@ -611,7 +611,7 @@ class DeepSpeedZeroOptimizer(ZeROOptimizer):
self
.
single_partition_of_fp32_groups
[
i
].
grad
=
get_accelerator
().
pin_memory
(
single_grad_partition
)
if
self
.
cpu_offload
else
single_grad_partition
# Initialize the optimizer states with the flatten
d
ed fp32 partition.
# Initialize the optimizer states with the flattened fp32 partition.
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
if
isinstance
(
self
.
optimizer
,
torch
.
optim
.
Adagrad
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录