Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
正统之独孤求败
mindspore
提交
51796aa6
M
mindspore
项目概览
正统之独孤求败
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
51796aa6
编写于
7月 25, 2020
作者:
L
lirongzhen1
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix sparse feature bug for auto parallel
上级
7fbed0ce
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
21 addition
and
15 deletion
+21
-15
mindspore/ops/_grad/grad_comm_ops.py
mindspore/ops/_grad/grad_comm_ops.py
+2
-2
tests/ut/python/parallel/test_sparse_feature_bprop.py
tests/ut/python/parallel/test_sparse_feature_bprop.py
+19
-13
未找到文件。
mindspore/ops/_grad/grad_comm_ops.py
浏览文件 @
51796aa6
...
...
@@ -200,14 +200,14 @@ def get_bprop_mirror_operator(self):
float_one
=
F
.
scalar_cast
(
1.0
,
F
.
dtype
(
grad
))
num
=
F
.
scalar_cast
(
dev_num
,
F
.
dtype
(
grad
))
grad
=
mul
(
grad
,
cast
(
F
.
scalar_to_array
(
float_one
/
num
),
F
.
dtype
(
grad
)))
dx
=
(
indices
,
grad
,
dout
.
dense_shape
())
dx
=
IndexedSlices
(
indices
,
grad
,
dout
.
dense_shape
())
else
:
if
F
.
issubclass_
(
F
.
typeof
(
dout
),
mstype
.
tensor
):
dx
=
all_reduce
(
dout
)
else
:
indices
=
all_gather
(
dout
.
indices
())
grad
=
all_gather
(
dout
.
values
())
dx
=
(
indices
,
grad
,
dout
.
dense_shape
())
dx
=
IndexedSlices
(
indices
,
grad
,
dout
.
dense_shape
())
return
(
dx
,)
return
bprop
...
...
tests/ut/python/parallel/test_sparse_feature_bprop.py
浏览文件 @
51796aa6
...
...
@@ -21,9 +21,8 @@ from mindspore import context
from
mindspore.common.parameter
import
Parameter
from
mindspore.common.tensor
import
Tensor
from
mindspore.ops
import
composite
as
C
,
operations
as
P
from
mindspore.ops.operations.comm_ops
import
AllReduce
,
_MirrorOperator
from
mindspore.ops.operations.comm_ops
import
AllReduce
from
mindspore.common.api
import
_executor
from
mindspore.communication.management
import
HCCL_WORLD_COMM_GROUP
from
mindspore.nn
import
TrainOneStepCell
,
Adam
...
...
@@ -60,30 +59,37 @@ def test_bprop_with_sparse_feature_allreduce():
_executor
.
compile
(
net
,
x
)
def
test_bprop_with_sparse_feature_mirror
():
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"
hybrid
_parallel"
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"
semi_auto
_parallel"
)
context
.
set_context
(
enable_sparse
=
True
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
axis
=
0
,
shape
=
None
):
def
__init__
(
self
,
shape
=
None
):
super
(
Net
,
self
).
__init__
()
if
shape
is
None
:
shape
=
[
8
,
8
]
self
.
mirror
=
_MirrorOperator
(
group
=
HCCL_WORLD_COMM_GROUP
)
self
.
gatherv2
=
P
.
SparseGatherV2
(
)
weight
=
Tensor
(
np
.
ones
([
64
,
64
]),
dtype
=
ms
.
float32
)
self
.
weight
=
Parameter
(
weight
,
"w"
)
self
.
index
=
Tensor
(
np
.
ones
(
shape
),
dtype
=
ms
.
int32
)
self
.
axis
=
axis
self
.
embeddinglookup
=
nn
.
EmbeddingLookup
()
self
.
embeddinglookup
.
embeddinglookup
.
set_strategy
(((
1
,
1
),
(
8
,
1
)))
def
construct
(
self
,
x
):
out
=
self
.
mirror
(
x
)
out
=
self
.
gatherv2
(
out
,
self
.
index
,
self
.
axis
)
def
construct
(
self
,
x
,
b
):
out
=
self
.
embeddinglookup
(
self
.
weight
,
self
.
index
)
return
out
net
=
GradWrap
(
Net
()
)
x
=
Tensor
(
np
.
ones
([
64
,
64
]),
dtype
=
ms
.
float32
)
_x
=
Tensor
(
np
.
ones
([
126
,
64
,
32
]),
dtype
=
ms
.
float32
)
_b
=
Tensor
(
np
.
ones
([
126
,
64
,
32
]),
dtype
=
ms
.
float32
)
_executor
.
compile
(
net
,
x
)
def
compile_net
(
net
):
optimizer
=
Adam
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
loss_scale
=
1024.0
,
weight_decay
=
0.9
)
train_net
=
TrainOneStepCell
(
net
,
optimizer
)
_executor
.
compile
(
train_net
,
_x
,
_b
)
net
=
Net
()
compile_net
(
net
)
def
test_bprop_with_sparse_feature_dataparallel
():
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录