Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
7fbaf2f6
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7fbaf2f6
编写于
4月 21, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
4月 21, 2020
浏览文件
操作
浏览文件
下载
差异文件
!546 GPU add testcase for maximum logical
Merge pull request !546 from VectorSL/add_test_new
上级
cbdc23d7
4740c70f
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
210 addition
and
0 deletion
+210
-0
mindspore/_akg/gpu/__init__.py
mindspore/_akg/gpu/__init__.py
+5
-0
mindspore/nn/wrap/loss_scale.py
mindspore/nn/wrap/loss_scale.py
+3
-0
mindspore/ops/_op_impl/akg/gpu/__init__.py
mindspore/ops/_op_impl/akg/gpu/__init__.py
+5
-0
mindspore/ops/operations/math_ops.py
mindspore/ops/operations/math_ops.py
+1
-0
tests/st/ops/gpu/test_lessequal_op.py
tests/st/ops/gpu/test_lessequal_op.py
+49
-0
tests/st/ops/gpu/test_logical_op.py
tests/st/ops/gpu/test_logical_op.py
+92
-0
tests/st/ops/gpu/test_maximum_op.py
tests/st/ops/gpu/test_maximum_op.py
+55
-0
未找到文件。
mindspore/_akg/gpu/__init__.py
浏览文件 @
7fbaf2f6
...
@@ -30,3 +30,8 @@ from .hsigmoid import HSigmoid, gpu_schedule_HSigmoid
...
@@ -30,3 +30,8 @@ from .hsigmoid import HSigmoid, gpu_schedule_HSigmoid
from
.hsigmoid_grad
import
HSigmoidGrad
,
gpu_schedule_HSigmoidGrad
from
.hsigmoid_grad
import
HSigmoidGrad
,
gpu_schedule_HSigmoidGrad
from
.hswish
import
HSwish
,
gpu_schedule_HSwish
from
.hswish
import
HSwish
,
gpu_schedule_HSwish
from
.hswish_grad
import
HSwishGrad
,
gpu_schedule_HSwishGrad
from
.hswish_grad
import
HSwishGrad
,
gpu_schedule_HSwishGrad
from
.logical_or
import
LogicalOr
,
gpu_schedule_LogicalOr
from
.logical_not
import
LogicalNot
,
gpu_schedule_LogicalNot
from
.logical_and
import
LogicalAnd
,
gpu_schedule_LogicalAnd
from
.sub
import
Sub
,
gpu_schedule_Sub
from
.less_equal
import
LessEqual
,
gpu_schedule_LessEqual
mindspore/nn/wrap/loss_scale.py
浏览文件 @
7fbaf2f6
...
@@ -209,6 +209,7 @@ class TrainOneStepWithLossScaleCell(Cell):
...
@@ -209,6 +209,7 @@ class TrainOneStepWithLossScaleCell(Cell):
self
.
gpu_target
=
True
self
.
gpu_target
=
True
self
.
float_status
=
P
.
FloatStatus
()
self
.
float_status
=
P
.
FloatStatus
()
self
.
addn
=
P
.
AddN
()
self
.
addn
=
P
.
AddN
()
self
.
reshape
=
P
.
Reshape
()
else
:
else
:
self
.
gpu_target
=
False
self
.
gpu_target
=
False
self
.
alloc_status
=
NPUAllocFloatStatus
()
self
.
alloc_status
=
NPUAllocFloatStatus
()
...
@@ -260,6 +261,8 @@ class TrainOneStepWithLossScaleCell(Cell):
...
@@ -260,6 +261,8 @@ class TrainOneStepWithLossScaleCell(Cell):
else
:
else
:
flag_sum
=
self
.
hyper_map
(
F
.
partial
(
_grad_overflow
),
grads
)
flag_sum
=
self
.
hyper_map
(
F
.
partial
(
_grad_overflow
),
grads
)
flag_sum
=
self
.
addn
(
flag_sum
)
flag_sum
=
self
.
addn
(
flag_sum
)
# convert flag_sum to scalar
flag_sum
=
self
.
reshape
(
flag_sum
,
(()))
if
self
.
is_distributed
:
if
self
.
is_distributed
:
# sum overflow flag over devices
# sum overflow flag over devices
flag_reduce
=
self
.
allreduce
(
flag_sum
)
flag_reduce
=
self
.
allreduce
(
flag_sum
)
...
...
mindspore/ops/_op_impl/akg/gpu/__init__.py
浏览文件 @
7fbaf2f6
...
@@ -27,3 +27,8 @@ from .hsigmoid import _hsigmoid_akg
...
@@ -27,3 +27,8 @@ from .hsigmoid import _hsigmoid_akg
from
.hsigmoid_grad
import
_hsigmoid_grad_akg
from
.hsigmoid_grad
import
_hsigmoid_grad_akg
from
.hswish
import
_hswish_akg
from
.hswish
import
_hswish_akg
from
.hswish_grad
import
_hswish_grad_akg
from
.hswish_grad
import
_hswish_grad_akg
from
.sub
import
_sub_akg
from
.logical_and
import
_logical_and_akg
from
.logical_not
import
_logical_not_akg
from
.logical_or
import
_logical_or_akg
from
.lessequal
import
_lessequal_akg
mindspore/ops/operations/math_ops.py
浏览文件 @
7fbaf2f6
...
@@ -1495,6 +1495,7 @@ class LogicalNot(PrimitiveWithInfer):
...
@@ -1495,6 +1495,7 @@ class LogicalNot(PrimitiveWithInfer):
@
prim_attr_register
@
prim_attr_register
def
__init__
(
self
):
def
__init__
(
self
):
"""init LogicalNot"""
"""init LogicalNot"""
self
.
init_prim_io_names
(
inputs
=
[
'x'
],
outputs
=
[
'output'
])
def
infer_shape
(
self
,
x_shape
):
def
infer_shape
(
self
,
x_shape
):
return
x_shape
return
x_shape
...
...
tests/st/ops/gpu/test_lessequal_op.py
0 → 100644
浏览文件 @
7fbaf2f6
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
pytest
from
mindspore.ops
import
operations
as
P
from
mindspore.nn
import
Cell
from
mindspore.common.tensor
import
Tensor
import
mindspore.context
as
context
import
numpy
as
np
class
Net
(
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
lessequal
=
P
.
LessEqual
()
def
construct
(
self
,
x
,
y
):
return
self
.
lessequal
(
x
,
y
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_lessequal
():
x
=
Tensor
(
np
.
array
([[
1
,
2
,
3
]]).
astype
(
np
.
float32
))
y
=
Tensor
(
np
.
array
([[
2
]]).
astype
(
np
.
float32
))
expect
=
[[
True
,
True
,
False
]]
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
lessequal
=
Net
()
output
=
lessequal
(
x
,
y
)
assert
np
.
all
(
output
.
asnumpy
()
==
expect
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
lessequal
=
Net
()
output
=
lessequal
(
x
,
y
)
assert
np
.
all
(
output
.
asnumpy
()
==
expect
)
tests/st/ops/gpu/test_logical_op.py
0 → 100644
浏览文件 @
7fbaf2f6
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
pytest
from
mindspore.ops
import
operations
as
P
from
mindspore.nn
import
Cell
from
mindspore.common.tensor
import
Tensor
import
mindspore.context
as
context
import
numpy
as
np
class
NetAnd
(
Cell
):
def
__init__
(
self
):
super
(
NetAnd
,
self
).
__init__
()
self
.
logicaland
=
P
.
LogicalAnd
()
def
construct
(
self
,
x
,
y
):
return
self
.
logicaland
(
x
,
y
)
class
NetOr
(
Cell
):
def
__init__
(
self
):
super
(
NetOr
,
self
).
__init__
()
self
.
logicalor
=
P
.
LogicalOr
()
def
construct
(
self
,
x
,
y
):
return
self
.
logicalor
(
x
,
y
)
class
NetNot
(
Cell
):
def
__init__
(
self
):
super
(
NetNot
,
self
).
__init__
()
self
.
logicalnot
=
P
.
LogicalNot
()
def
construct
(
self
,
x
):
return
self
.
logicalnot
(
x
)
x
=
np
.
array
([
True
,
False
,
False
]).
astype
(
np
.
bool
)
y
=
np
.
array
([
False
]).
astype
(
np
.
bool
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_logicaland
():
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
logicaland
=
NetAnd
()
output
=
logicaland
(
Tensor
(
x
),
Tensor
(
y
))
assert
np
.
all
(
output
.
asnumpy
()
==
np
.
logical_and
(
x
,
y
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
logicaland
=
NetAnd
()
output
=
logicaland
(
Tensor
(
x
),
Tensor
(
y
))
assert
np
.
all
(
output
.
asnumpy
()
==
np
.
logical_and
(
x
,
y
))
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_logicalor
():
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
logicalor
=
NetOr
()
output
=
logicalor
(
Tensor
(
x
),
Tensor
(
y
))
assert
np
.
all
(
output
.
asnumpy
()
==
np
.
logical_or
(
x
,
y
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
logicalor
=
NetOr
()
output
=
logicalor
(
Tensor
(
x
),
Tensor
(
y
))
assert
np
.
all
(
output
.
asnumpy
()
==
np
.
logical_or
(
x
,
y
))
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_logicalnot
():
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
logicalnot
=
NetNot
()
output
=
logicalnot
(
Tensor
(
x
))
assert
np
.
all
(
output
.
asnumpy
()
==
np
.
logical_not
(
x
))
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
logicalnot
=
NetNot
()
output
=
logicalnot
(
Tensor
(
x
))
assert
np
.
all
(
output
.
asnumpy
()
==
np
.
logical_not
(
x
))
tests/st/ops/gpu/test_maximum_op.py
0 → 100644
浏览文件 @
7fbaf2f6
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
pytest
from
mindspore.ops
import
operations
as
P
from
mindspore.nn
import
Cell
from
mindspore.common.tensor
import
Tensor
import
mindspore.context
as
context
import
numpy
as
np
class
Net
(
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
max
=
P
.
Maximum
()
def
construct
(
self
,
x
,
y
):
return
self
.
max
(
x
,
y
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_max
():
x
=
Tensor
(
np
.
array
([[
1
,
2
,
3
]]).
astype
(
np
.
float32
))
y
=
Tensor
(
np
.
array
([[
2
]]).
astype
(
np
.
float32
))
expect
=
[[
2
,
2
,
3
]]
error
=
np
.
ones
(
shape
=
[
1
,
3
])
*
1.0e-5
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
max
=
Net
()
output
=
max
(
x
,
y
)
diff
=
output
.
asnumpy
()
-
expect
assert
np
.
all
(
diff
<
error
)
assert
np
.
all
(
-
diff
<
error
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
max
=
Net
()
output
=
max
(
x
,
y
)
diff
=
output
.
asnumpy
()
-
expect
assert
np
.
all
(
diff
<
error
)
assert
np
.
all
(
-
diff
<
error
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录