Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
860a3263
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
860a3263
编写于
7月 03, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 03, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2671 vm for MaxPoolGradGrad, MaxPoolGradGradWithArgmax
Merge pull request !2671 from jiangjinsheng/vm_max_pool_ext2
上级
27552eb1
ce4e15b0
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
225 addition
and
0 deletion
+225
-0
mindspore/ops/_grad/grad_nn_ops.py
mindspore/ops/_grad/grad_nn_ops.py
+34
-0
mindspore/ops/_op_impl/tbe/__init__.py
mindspore/ops/_op_impl/tbe/__init__.py
+2
-0
mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py
mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py
+41
-0
mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py
mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py
+41
-0
mindspore/ops/operations/_grad_ops.py
mindspore/ops/operations/_grad_ops.py
+93
-0
tests/ut/python/ops/test_ops.py
tests/ut/python/ops/test_ops.py
+14
-0
未找到文件。
mindspore/ops/_grad/grad_nn_ops.py
浏览文件 @
860a3263
...
...
@@ -146,6 +146,40 @@ def get_bprop_max_pool_with_argmax(self):
return
bprop
@
bprop_getters
.
register
(
G
.
MaxPoolGrad
)
def
get_bprop_max_pool_grad_grad
(
self
):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad
=
G
.
MaxPoolGradGrad
(
ksize
=
self
.
ksize
,
strides
=
self
.
strides
,
padding
=
self
.
padding
)
def
bprop
(
x1
,
x2
,
grad
,
out
,
dout
):
dx1
=
zeros_like
(
x1
)
dx2
=
zeros_like
(
x2
)
dgrad
=
maxpool_grad_grad
(
x1
,
x2
,
dout
)
return
(
dx1
,
dx2
,
dgrad
)
return
bprop
@
bprop_getters
.
register
(
G
.
MaxPoolGradGrad
)
def
get_bprop_max_pool_grad_grad_grad
(
self
):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad
=
G
.
MaxPoolGrad
(
ksize
=
self
.
ksize
,
strides
=
self
.
strides
,
padding
=
self
.
padding
)
def
bprop
(
x1
,
x2
,
grad
,
out
,
dout
):
dx1
=
zeros_like
(
x1
)
dx2
=
zeros_like
(
x2
)
dgrad
=
maxpool_grad
(
x1
,
x2
,
dout
)
return
(
dx1
,
dx2
,
dgrad
)
return
bprop
@
bprop_getters
.
register
(
P
.
MaxPool
)
def
get_bprop_max_pool_grad
(
self
):
"""Grad definition for `MaxPool` operation."""
...
...
mindspore/ops/_op_impl/tbe/__init__.py
浏览文件 @
860a3263
...
...
@@ -282,3 +282,5 @@ from .scatter_sub import _scatter_sub_tbe
from
.scatter_mul
import
_scatter_mul_tbe
from
.scatter_div
import
_scatter_div_tbe
from
.mod
import
_mod_tbe
from
.max_pool_grad_grad
import
_max_pool_grad_grad_tbe
from
.max_pool_grad_grad_with_argmax
import
_max_pool_grad_grad_with_argmax_tbe
mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py
0 → 100644
浏览文件 @
860a3263
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaxPoolGradGrad op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
max_pool_grad_grad_op_info
=
TBERegOp
(
"MaxPoolGradGrad"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"max_pool_grad_grad.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"max_pool_grad_grad"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"ksize"
,
"required"
,
"listInt"
,
"all"
)
\
.
attr
(
"strides"
,
"required"
,
"listInt"
,
"all"
)
\
.
attr
(
"padding"
,
"required"
,
"str"
,
"all"
)
\
.
attr
(
"data_format"
,
"optional"
,
"str"
,
"all"
)
\
.
input
(
0
,
"x1"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"x2"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"grad"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"y"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
get_op_info
()
@
op_info_register
(
max_pool_grad_grad_op_info
)
def
_max_pool_grad_grad_tbe
():
"""MaxPoolGradGrad TBE register"""
return
mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py
0 → 100644
浏览文件 @
860a3263
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaxPoolGradGradWithArgmax op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
max_pool_grad_grad_with_argmax_op_info
=
TBERegOp
(
"MaxPoolGradGradWithArgmax"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"max_pool_grad_grad_with_argmax.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"max_pool_grad_grad_with_argmax"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"ksize"
,
"required"
,
"listInt"
,
"all"
)
\
.
attr
(
"strides"
,
"required"
,
"listInt"
,
"all"
)
\
.
attr
(
"padding"
,
"required"
,
"str"
,
"all"
)
\
.
input
(
0
,
"x"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"grad"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"argmax"
,
False
,
"optional"
,
"all"
)
\
.
output
(
0
,
"y"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
U16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
I64_5HD
,
DataType
.
F16_5HD
)
\
.
get_op_info
()
@
op_info_register
(
max_pool_grad_grad_with_argmax_op_info
)
def
_max_pool_grad_grad_with_argmax_tbe
():
"""MaxPoolGradGradWithArgmax TBE register"""
return
mindspore/ops/operations/_grad_ops.py
浏览文件 @
860a3263
...
...
@@ -536,6 +536,51 @@ class MaxPoolGrad(_PoolGrad):
return
x1_dtype
class
MaxPoolGradGrad
(
_PoolGrad
):
r
"""
Performs gradients of the MaxPoolGrad operation.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **origin_input** (Tensor) - Tensor with data format "NCHW", data type should be float16.
- **origin_output** (Tensor) - Data type same as `origin_input`.
- **grad** (Tensor) - Data type same as `origin_input`.
Outputs:
Tensor, With data type same as `origin_input`.
"""
@
prim_attr_register
def
__init__
(
self
,
ksize
=
1
,
strides
=
1
,
padding
=
"VALID"
):
super
(
MaxPoolGradGrad
,
self
).
__init__
(
ksize
,
strides
,
padding
)
def
infer_shape
(
self
,
x1_shape
,
x2_shape
,
grad_shape
):
return
x1_shape
def
infer_dtype
(
self
,
x1_dtype
,
x2_dtype
,
grad_dtype
):
args
=
{
'x1_dtype'
:
x1_dtype
,
'x2_dtype'
:
x2_dtype
,
'grad_dtype'
:
grad_dtype
}
validator
.
check_tensor_type_same
(
args
,
[
mstype
.
float16
],
self
.
name
)
return
x1_dtype
class
MaximumGrad
(
Primitive
):
"""Grad for maximum."""
...
...
@@ -564,6 +609,54 @@ class MaxPoolGradWithArgmax(_PoolGrad):
return
grad_dtype
class
MaxPoolGradGradWithArgmax
(
_PoolGrad
):
r
"""
Computes the gradients of MaxPoolGradWithArgmax.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **x** (Tensor) - Tensor with data format "NCHW", data type should be float16.
- **grad** (Tensor) - Data type same as `x`.
- **argmax** (Tensor) - Data type should be uint16 or int64.
Outputs:
Tensor, With data type same as `x`.
"""
@
prim_attr_register
def
__init__
(
self
,
ksize
=
1
,
strides
=
1
,
padding
=
"VALID"
):
self
.
init_prim_io_names
(
inputs
=
[
'x'
,
'grad'
,
'argmax'
],
outputs
=
[
'output'
])
super
(
MaxPoolGradGradWithArgmax
,
self
).
__init__
(
ksize
,
strides
,
padding
)
def
infer_shape
(
self
,
x_shape
,
grad_shape
,
argmax_shape
):
if
not
grad_shape
:
raise
TypeError
(
"The dout of MaxPoolGradGradWithArgmax should be a Tensor."
)
return
x_shape
def
infer_dtype
(
self
,
x_dtype
,
grad_dtype
,
argmax_dtype
):
args
=
{
'x_dtype'
:
x_dtype
,
'grad_dtype'
:
grad_dtype
}
validator
.
check_tensor_type_same
(
args
,
[
mstype
.
float16
],
self
.
name
)
return
grad_dtype
class
MinimumGrad
(
Primitive
):
"""Grad for minimum."""
...
...
tests/ut/python/ops/test_ops.py
浏览文件 @
860a3263
...
...
@@ -1585,6 +1585,20 @@ test_case_nn_ops = [
'desc_inputs'
:
[
Tensor
([
0
,
1
,
2
,
3
],
mstype
.
int32
)],
'desc_bprop'
:
[],
'skip'
:
[
'backward'
]}),
(
'MaxPoolGradGrad'
,
{
'block'
:
G
.
MaxPoolGradGrad
(),
'desc_inputs'
:
[
Tensor
(
np
.
random
.
rand
(
1
,
1
,
2
,
2
),
mstype
.
float16
),
Tensor
(
np
.
random
.
rand
(
1
,
1
,
2
,
2
),
mstype
.
float16
),
Tensor
(
np
.
random
.
rand
(
1
,
1
,
2
,
2
),
mstype
.
float16
)],
'desc_bprop'
:
[],
'skip'
:
[
'backward'
]}),
(
'MaxPoolGradGradWithArgmax'
,
{
'block'
:
G
.
MaxPoolGradGradWithArgmax
(),
'desc_inputs'
:
[
Tensor
(
np
.
random
.
rand
(
1
,
1
,
2
,
2
),
mstype
.
float16
),
Tensor
(
np
.
random
.
rand
(
1
,
1
,
2
,
2
),
mstype
.
float16
),
Tensor
(
np
.
zeros
((
1
,
1
,
2
,
2
)),
mstype
.
uint16
)],
'desc_bprop'
:
[],
'skip'
:
[
'backward'
]}),
]
test_case_array_ops
=
[
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录