Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
77725e81
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
77725e81
编写于
4月 13, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
4月 13, 2020
浏览文件
操作
浏览文件
下载
差异文件
!258 add_minimum_distributed_op
Merge pull request !258 from lichen/add_minimum_distributed_op
上级
5bb4e812
b81cc6ea
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
108 addition
and
13 deletion
+108
-13
mindspore/ccsrc/parallel/dynamic_creator.h
mindspore/ccsrc/parallel/dynamic_creator.h
+1
-0
mindspore/ccsrc/parallel/ops_info/comparison_function_info.h
mindspore/ccsrc/parallel/ops_info/comparison_function_info.h
+8
-0
mindspore/ccsrc/parallel/ops_info/ops_utils.h
mindspore/ccsrc/parallel/ops_info/ops_utils.h
+1
-0
mindspore/ccsrc/parallel/step_auto_parallel.cc
mindspore/ccsrc/parallel/step_auto_parallel.cc
+1
-0
tests/ut/python/parallel/test_comparison_function_info.py
tests/ut/python/parallel/test_comparison_function_info.py
+97
-13
未找到文件。
mindspore/ccsrc/parallel/dynamic_creator.h
浏览文件 @
77725e81
...
...
@@ -114,6 +114,7 @@ REGISTER(DropoutDoMaskInfo);
REGISTER
(
ReshapeInfo
);
REGISTER
(
FloorDivInfo
);
REGISTER
(
MaximumInfo
);
REGISTER
(
MinimumInfo
);
REGISTER
(
CastInfo
);
REGISTER
(
GreaterInfo
);
REGISTER
(
SparseSoftmaxCrossEntropyWithLogitsInfo
);
...
...
mindspore/ccsrc/parallel/ops_info/comparison_function_info.h
浏览文件 @
77725e81
...
...
@@ -50,6 +50,14 @@ class MaximumInfo : public ArithmeticBase {
:
ArithmeticBase
(
name
,
inputs_shape
,
outputs_shape
,
attrs
)
{}
~
MaximumInfo
()
override
=
default
;
};
class
MinimumInfo
:
public
ArithmeticBase
{
public:
MinimumInfo
(
const
std
::
string
&
name
,
const
Shapes
&
inputs_shape
,
const
Shapes
&
outputs_shape
,
const
PrimitiveAttrs
&
attrs
)
:
ArithmeticBase
(
name
,
inputs_shape
,
outputs_shape
,
attrs
)
{}
~
MinimumInfo
()
override
=
default
;
};
}
// namespace parallel
}
// namespace mindspore
...
...
mindspore/ccsrc/parallel/ops_info/ops_utils.h
浏览文件 @
77725e81
...
...
@@ -186,6 +186,7 @@ constexpr char LOG[] = "Log";
constexpr
char
SIGMOID
[]
=
"Sigmoid"
;
constexpr
char
POW
[]
=
"Pow"
;
constexpr
char
MAXIMUM
[]
=
"Maximum"
;
constexpr
char
MINIMUM
[]
=
"Minimum"
;
constexpr
char
EQUAL
[]
=
"Equal"
;
constexpr
char
NOT_EQUAL
[]
=
"NotEqual"
;
constexpr
char
LOGICALNOT
[]
=
"LogicalNot"
;
...
...
mindspore/ccsrc/parallel/step_auto_parallel.cc
浏览文件 @
77725e81
...
...
@@ -93,6 +93,7 @@ std::vector<std::string> splittable_op_ = {MATMUL,
SIGMOID
,
POW
,
MAXIMUM
,
MINIMUM
,
EQUAL
,
NOT_EQUAL
,
LOGICALNOT
,
...
...
tests/ut/python/parallel/test_comparison_function_info.py
浏览文件 @
77725e81
...
...
@@ -54,11 +54,10 @@ def test_matmul_equal():
out
=
self
.
equal
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
2
),
(
4
,
2
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
)
x
=
Tensor
(
np
.
ones
([
128
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
...
...
@@ -78,11 +77,10 @@ def test_matmul_not_equal():
out
=
self
.
notequal
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
2
),
(
4
,
2
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
)
x
=
Tensor
(
np
.
ones
([
128
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
...
...
@@ -102,11 +100,10 @@ def test_matmul_not_equal_repeated_calculation():
out
=
self
.
notequal
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
1
),
(
4
,
1
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
)
x
=
Tensor
(
np
.
ones
([
128
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
...
...
@@ -126,11 +123,10 @@ def test_matmul_maximum():
out
=
self
.
maximum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
2
),
(
4
,
2
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
)
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
...
...
@@ -150,11 +146,10 @@ def test_matmul_maximum_broadcast():
out
=
self
.
maximum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
2
),
(
2
,
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
)
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
...
...
@@ -174,13 +169,102 @@ def test_matmul_maximum_broadcast2():
out
=
self
.
maximum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
)
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
4
),
(
4
,
1
))
strategy2
=
((
4
,
1
),
(
1
,
2
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
context
.
set_auto_parallel_context
(
parallel_mode
=
"semi_auto_parallel"
)
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
1
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
1
,
64
]),
dtype
=
ms
.
float32
)
_executor
.
compile
(
net
,
x
,
y
,
b
)
\ No newline at end of file
_executor
.
compile
(
net
,
x
,
y
,
b
)
def
test_matmul_minimum
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
strategy1
,
strategy2
):
super
().
__init__
()
self
.
matmul
=
P
.
MatMul
().
set_strategy
(
strategy1
)
self
.
minimum
=
P
.
Minimum
().
set_strategy
(
strategy2
)
def
construct
(
self
,
x
,
y
,
b
):
out
=
self
.
matmul
(
x
,
y
)
out
=
self
.
minimum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
2
),
(
4
,
2
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
64
,
64
]),
dtype
=
ms
.
float32
)
_executor
.
compile
(
net
,
x
,
y
,
b
)
def
test_matmul_minimum_broadcast
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
strategy1
,
strategy2
):
super
().
__init__
()
self
.
matmul
=
P
.
MatMul
().
set_strategy
(
strategy1
)
self
.
minimum
=
P
.
Maximum
().
set_strategy
(
strategy2
)
def
construct
(
self
,
x
,
y
,
b
):
out
=
self
.
matmul
(
x
,
y
)
out
=
self
.
minimum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
2
),
(
2
,
2
))
strategy2
=
((
4
,
2
),
(
2
,
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
64
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
64
]),
dtype
=
ms
.
float32
)
_executor
.
compile
(
net
,
x
,
y
,
b
)
def
test_matmul_minimum_broadcast2
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
strategy1
,
strategy2
):
super
().
__init__
()
self
.
matmul
=
P
.
MatMul
().
set_strategy
(
strategy1
)
self
.
minimum
=
P
.
Minimum
().
set_strategy
(
strategy2
)
def
construct
(
self
,
x
,
y
,
b
):
out
=
self
.
matmul
(
x
,
y
)
out
=
self
.
minimum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"semi_auto_parallel"
)
strategy1
=
((
2
,
4
),
(
4
,
1
))
strategy2
=
((
4
,
1
),
(
1
,
2
))
net
=
GradWrap
(
NetWithLoss
(
Net
(
strategy1
,
strategy2
)))
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
1
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
1
,
64
]),
dtype
=
ms
.
float32
)
_executor
.
compile
(
net
,
x
,
y
,
b
)
def
test_matmul_minimum_auto_parallel
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
().
__init__
()
self
.
matmul
=
P
.
MatMul
()
self
.
minimum
=
P
.
Minimum
()
def
construct
(
self
,
x
,
y
,
b
):
out
=
self
.
matmul
(
x
,
y
)
out
=
self
.
minimum
(
out
,
b
)
return
out
context
.
set_auto_parallel_context
(
device_num
=
8
,
global_rank
=
0
,
parallel_mode
=
"auto_parallel"
)
net
=
GradWrap
(
NetWithLoss
(
Net
()))
x
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
32
,
1
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
1
,
64
]),
dtype
=
ms
.
float32
)
_executor
.
compile
(
net
,
x
,
y
,
b
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录