Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
cf2fc1ce
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cf2fc1ce
编写于
6月 11, 2020
作者:
V
VectorSL
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
gpu add notequal greaterequal akg kernel
上级
c1c683ee
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
317 addition
and
2 deletion
+317
-2
mindspore/_akg/gpu/__init__.py
mindspore/_akg/gpu/__init__.py
+2
-0
mindspore/_akg/gpu/greater_equal.py
mindspore/_akg/gpu/greater_equal.py
+41
-0
mindspore/_akg/gpu/notequal.py
mindspore/_akg/gpu/notequal.py
+41
-0
mindspore/_akg/ops/math/greater_equal.py
mindspore/_akg/ops/math/greater_equal.py
+54
-0
mindspore/_akg/ops/math/notequal.py
mindspore/_akg/ops/math/notequal.py
+54
-0
mindspore/ops/_op_impl/akg/gpu/__init__.py
mindspore/ops/_op_impl/akg/gpu/__init__.py
+2
-0
mindspore/ops/_op_impl/akg/gpu/greater_equal.py
mindspore/ops/_op_impl/akg/gpu/greater_equal.py
+32
-0
mindspore/ops/_op_impl/akg/gpu/lessequal.py
mindspore/ops/_op_impl/akg/gpu/lessequal.py
+2
-2
mindspore/ops/_op_impl/akg/gpu/notequal.py
mindspore/ops/_op_impl/akg/gpu/notequal.py
+32
-0
tests/st/ops/gpu/test_equal_op.py
tests/st/ops/gpu/test_equal_op.py
+57
-0
未找到文件。
mindspore/_akg/gpu/__init__.py
浏览文件 @
cf2fc1ce
...
...
@@ -35,3 +35,5 @@ from .logical_not import LogicalNot, gpu_schedule_LogicalNot
from
.logical_and
import
LogicalAnd
,
gpu_schedule_LogicalAnd
from
.sub
import
Sub
,
gpu_schedule_Sub
from
.less_equal
import
LessEqual
,
gpu_schedule_LessEqual
from
.notequal
import
NotEqual
,
gpu_schedule_NotEqual
from
.greater_equal
import
GreaterEqual
,
gpu_schedule_GreaterEqual
mindspore/_akg/gpu/greater_equal.py
0 → 100644
浏览文件 @
cf2fc1ce
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""greater_equal"""
import
_akg.tvm
from
_akg.ops.math
import
greater_equal
from
_akg.topi.generic
import
schedule_elemwise
def
GreaterEqual
(
x
,
y
):
"""GreaterEqual."""
return
greater_equal
.
greater_equal
(
x
,
y
)
def
gpu_schedule_GreaterEqual
(
outs
):
"""
GPU schedule for GreaterEqual.
Args:
outs (tvm.tensor.Tensor): Outputs of compute.
Returns:
sch (schedule.Schedule): The created schedule.
"""
device
=
'cuda'
ctx
=
_akg
.
tvm
.
context
(
device
,
0
)
if
not
ctx
.
exist
:
raise
SystemError
(
"Skip because %s is not enabled"
%
device
)
with
_akg
.
tvm
.
target
.
create
(
device
):
sch
=
schedule_elemwise
(
outs
)
return
sch
mindspore/_akg/gpu/notequal.py
0 → 100644
浏览文件 @
cf2fc1ce
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""notequal"""
import
_akg.tvm
from
_akg.ops.math
import
notequal
from
_akg.topi.generic
import
schedule_elemwise
def
NotEqual
(
x
,
y
):
"""notequal."""
return
notequal
.
notequal
(
x
,
y
)
def
gpu_schedule_NotEqual
(
outs
):
"""
gpu schedule for NotEqual.
Args:
outs (tvm.tensor.Tensor): outputs of compute.
Returns:
sch (schedule.Schedule): The created schedule.
"""
device
=
'cuda'
ctx
=
_akg
.
tvm
.
context
(
device
,
0
)
if
not
ctx
.
exist
:
raise
SystemError
(
"Skip because %s is not enabled"
%
device
)
with
_akg
.
tvm
.
target
.
create
(
device
):
sch
=
schedule_elemwise
(
outs
)
return
sch
mindspore/_akg/ops/math/greater_equal.py
0 → 100644
浏览文件 @
cf2fc1ce
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: greaterequal"""
import
_akg.tvm
import
_akg.topi
from
_akg.utils.dsl_create
import
produce_shapes
from
_akg.utils
import
validation_check
as
vc_util
@
vc_util
.
check_input_type
(
_akg
.
tvm
.
tensor
.
Tensor
,
_akg
.
tvm
.
tensor
.
Tensor
)
def
greater_equal
(
input1
,
input2
):
"""
Check whether input1 greaterquals to input2.
Args:
input1 (tvm.tensor.Tensor): Tensor.
input2 (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor. If input1 greaterquals to input2 return True, else return False.
"""
shape1
=
[
x
.
value
for
x
in
input1
.
shape
]
shape2
=
[
x
.
value
for
x
in
input2
.
shape
]
vc_util
.
check_shape
(
shape1
)
vc_util
.
check_shape
(
shape2
)
shape1
,
shape2
,
shape
=
produce_shapes
(
shape1
,
shape2
)
vc_util
.
elemwise_dtype_check
(
input1
.
dtype
,
input2
.
dtype
)
dtype
=
input1
.
dtype
# get greaterquals compute
t_value
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
_akg
.
tvm
.
const
(
1
,
dtype
),
"T"
)
f_value
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
_akg
.
tvm
.
const
(
0
,
dtype
),
"F"
)
input1_bro
=
_akg
.
topi
.
broadcast_to
(
input1
,
shape
)
input2_bro
=
_akg
.
topi
.
broadcast_to
(
input2
,
shape
)
c_out
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
_akg
.
tvm
.
expr
.
Select
(
input1_bro
[
indice
]
>=
input2_bro
[
indice
],
t_value
[
indice
],
f_value
[
indice
]),
name
=
"C"
)
res
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
c_out
(
*
indice
).
astype
(
"bool"
),
name
=
"res"
)
return
res
mindspore/_akg/ops/math/notequal.py
0 → 100644
浏览文件 @
cf2fc1ce
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: notequal"""
import
_akg.tvm
import
_akg.topi
from
_akg.utils.dsl_create
import
produce_shapes
from
_akg.utils
import
validation_check
as
vc_util
@
vc_util
.
check_input_type
(
_akg
.
tvm
.
tensor
.
Tensor
,
_akg
.
tvm
.
tensor
.
Tensor
)
def
notequal
(
input1
,
input2
):
"""
check whether input1 notequals to input2.
Args:
input1 (tvm.tensor.Tensor): Tensor.
input2 (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor. If input1 notequal to input2 return True, else return False.
"""
shape1
=
[
x
.
value
for
x
in
input1
.
shape
]
shape2
=
[
x
.
value
for
x
in
input2
.
shape
]
vc_util
.
check_shape
(
shape1
)
vc_util
.
check_shape
(
shape2
)
shape1
,
shape2
,
shape
=
produce_shapes
(
shape1
,
shape2
)
vc_util
.
elemwise_dtype_check
(
input1
.
dtype
,
input2
.
dtype
)
dtype
=
input1
.
dtype
# get notequal compute
t_value
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
_akg
.
tvm
.
const
(
1
,
dtype
),
"T"
)
f_value
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
_akg
.
tvm
.
const
(
0
,
dtype
),
"F"
)
input1_bro
=
_akg
.
topi
.
broadcast_to
(
input1
,
shape
)
input2_bro
=
_akg
.
topi
.
broadcast_to
(
input2
,
shape
)
c_out
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
_akg
.
tvm
.
expr
.
Select
(
input1_bro
[
indice
]
!=
input2_bro
[
indice
],
t_value
[
indice
],
f_value
[
indice
]),
name
=
"C"
)
res
=
_akg
.
tvm
.
compute
(
shape
,
lambda
*
indice
:
c_out
(
*
indice
).
astype
(
"bool"
),
name
=
"res"
)
return
res
mindspore/ops/_op_impl/akg/gpu/__init__.py
浏览文件 @
cf2fc1ce
...
...
@@ -32,3 +32,5 @@ from .logical_and import _logical_and_akg
from
.logical_not
import
_logical_not_akg
from
.logical_or
import
_logical_or_akg
from
.lessequal
import
_lessequal_akg
from
.notequal
import
_notequal_akg
from
.greater_equal
import
_greater_equal_akg
mindspore/ops/_op_impl/akg/gpu/greater_equal.py
0 → 100644
浏览文件 @
cf2fc1ce
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GreaterEqual op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
AkgRegOp
,
DataType
greater_equal_op_info
=
AkgRegOp
(
"GreaterEqual"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
input
(
0
,
"x"
)
\
.
input
(
1
,
"y"
)
\
.
output
(
0
,
"output"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
BOOL_Default
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
BOOL_Default
)
\
.
dtype_format
(
DataType
.
I32_Default
,
DataType
.
I32_Default
,
DataType
.
BOOL_Default
)
\
.
get_op_info
()
@
op_info_register
(
greater_equal_op_info
)
def
_greater_equal_akg
():
"""GreaterEqual register"""
return
mindspore/ops/_op_impl/akg/gpu/lessequal.py
浏览文件 @
cf2fc1ce
...
...
@@ -15,7 +15,7 @@
"""LessEqual op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
AkgRegOp
,
DataType
equal_op_info
=
AkgRegOp
(
"LessEqual"
)
\
less
equal_op_info
=
AkgRegOp
(
"LessEqual"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
input
(
0
,
"x"
)
\
.
input
(
1
,
"y"
)
\
...
...
@@ -26,7 +26,7 @@ equal_op_info = AkgRegOp("LessEqual") \
.
get_op_info
()
@
op_info_register
(
equal_op_info
)
@
op_info_register
(
less
equal_op_info
)
def
_lessequal_akg
():
"""LessEqual register"""
return
mindspore/ops/_op_impl/akg/gpu/notequal.py
0 → 100644
浏览文件 @
cf2fc1ce
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NotEqual op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
AkgRegOp
,
DataType
notequal_op_info
=
AkgRegOp
(
"NotEqual"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
input
(
0
,
"x"
)
\
.
input
(
1
,
"y"
)
\
.
output
(
0
,
"output"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
BOOL_Default
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
BOOL_Default
)
\
.
dtype_format
(
DataType
.
I32_Default
,
DataType
.
I32_Default
,
DataType
.
BOOL_Default
)
\
.
get_op_info
()
@
op_info_register
(
notequal_op_info
)
def
_notequal_akg
():
"""NotEqual AutoDiff register"""
return
tests/st/ops/gpu/test_equal_op.py
浏览文件 @
cf2fc1ce
...
...
@@ -30,6 +30,21 @@ class NetEqual(Cell):
def
construct
(
self
,
x
,
y
):
return
self
.
Equal
(
x
,
y
)
class
NetNotEqual
(
Cell
):
def
__init__
(
self
):
super
(
NetNotEqual
,
self
).
__init__
()
self
.
NotEqual
=
P
.
NotEqual
()
def
construct
(
self
,
x
,
y
):
return
self
.
NotEqual
(
x
,
y
)
class
NetGreaterEqual
(
Cell
):
def
__init__
(
self
):
super
(
NetGreaterEqual
,
self
).
__init__
()
self
.
GreaterEqual
=
P
.
GreaterEqual
()
def
construct
(
self
,
x
,
y
):
return
self
.
GreaterEqual
(
x
,
y
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
...
...
@@ -63,3 +78,45 @@ def test_equal():
output1
=
equal
(
x1
,
y1
)
assert
np
.
all
(
output1
.
asnumpy
()
==
expect1
)
assert
output1
.
shape
()
==
expect1
.
shape
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_notequal
():
x0
=
Tensor
(
np
.
array
([[
1.2
,
1
],
[
1
,
0
]]).
astype
(
np
.
float32
))
y0
=
Tensor
(
np
.
array
([[
1
,
2
]]).
astype
(
np
.
float32
))
expect0
=
np
.
array
([[
True
,
True
],
[
False
,
True
]])
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
notequal
=
NetNotEqual
()
output0
=
notequal
(
x0
,
y0
)
assert
np
.
all
(
output0
.
asnumpy
()
==
expect0
)
assert
output0
.
shape
()
==
expect0
.
shape
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
notequal
=
NetNotEqual
()
output0
=
notequal
(
x0
,
y0
)
assert
np
.
all
(
output0
.
asnumpy
()
==
expect0
)
assert
output0
.
shape
()
==
expect0
.
shape
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
env_onecard
def
test_greaterqual
():
x0
=
Tensor
(
np
.
array
([[
1.2
,
1
],
[
1
,
0
]]).
astype
(
np
.
float32
))
y0
=
Tensor
(
np
.
array
([[
1
,
2
]]).
astype
(
np
.
float32
))
expect0
=
np
.
array
([[
True
,
False
],
[
True
,
False
]])
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
gequal
=
NetGreaterEqual
()
output0
=
gequal
(
x0
,
y0
)
assert
np
.
all
(
output0
.
asnumpy
()
==
expect0
)
assert
output0
.
shape
()
==
expect0
.
shape
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"GPU"
)
gequal
=
NetGreaterEqual
()
output0
=
gequal
(
x0
,
y0
)
assert
np
.
all
(
output0
.
asnumpy
()
==
expect0
)
assert
output0
.
shape
()
==
expect0
.
shape
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录