提交 860a3263 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!2671 vm for MaxPoolGradGrad, MaxPoolGradGradWithArgmax

Merge pull request !2671 from jiangjinsheng/vm_max_pool_ext2
......@@ -146,6 +146,40 @@ def get_bprop_max_pool_with_argmax(self):
return bprop
@bprop_getters.register(G.MaxPoolGrad)
def get_bprop_max_pool_grad_grad(self):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad = G.MaxPoolGradGrad(
ksize=self.ksize,
strides=self.strides,
padding=self.padding)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(G.MaxPoolGradGrad)
def get_bprop_max_pool_grad_grad_grad(self):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad = G.MaxPoolGrad(
ksize=self.ksize,
strides=self.strides,
padding=self.padding)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
dx2 = zeros_like(x2)
dgrad = maxpool_grad(x1, x2, dout)
return (dx1, dx2, dgrad)
return bprop
@bprop_getters.register(P.MaxPool)
def get_bprop_max_pool_grad(self):
"""Grad definition for `MaxPool` operation."""
......
......@@ -282,3 +282,5 @@ from .scatter_sub import _scatter_sub_tbe
from .scatter_mul import _scatter_mul_tbe
from .scatter_div import _scatter_div_tbe
from .mod import _mod_tbe
from .max_pool_grad_grad import _max_pool_grad_grad_tbe
from .max_pool_grad_grad_with_argmax import _max_pool_grad_grad_with_argmax_tbe
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaxPoolGradGrad op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
max_pool_grad_grad_op_info = TBERegOp("MaxPoolGradGrad") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("max_pool_grad_grad.so") \
.compute_cost(10) \
.kernel_name("max_pool_grad_grad") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.input(2, "grad", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \
.get_op_info()
@op_info_register(max_pool_grad_grad_op_info)
def _max_pool_grad_grad_tbe():
"""MaxPoolGradGrad TBE register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaxPoolGradGradWithArgmax op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
max_pool_grad_grad_with_argmax_op_info = TBERegOp("MaxPoolGradGradWithArgmax") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("max_pool_grad_grad_with_argmax.so") \
.compute_cost(10) \
.kernel_name("max_pool_grad_grad_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "grad", False, "required", "all") \
.input(2, "argmax", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.U16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.I64_5HD, DataType.F16_5HD) \
.get_op_info()
@op_info_register(max_pool_grad_grad_with_argmax_op_info)
def _max_pool_grad_grad_with_argmax_tbe():
"""MaxPoolGradGradWithArgmax TBE register"""
return
......@@ -536,6 +536,51 @@ class MaxPoolGrad(_PoolGrad):
return x1_dtype
class MaxPoolGradGrad(_PoolGrad):
r"""
Performs gradients of the MaxPoolGrad operation.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **origin_input** (Tensor) - Tensor with data format "NCHW", data type should be float16.
- **origin_output** (Tensor) - Data type same as `origin_input`.
- **grad** (Tensor) - Data type same as `origin_input`.
Outputs:
Tensor, With data type same as `origin_input`.
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(MaxPoolGradGrad, self).__init__(ksize, strides, padding)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
def infer_dtype(self, x1_dtype, x2_dtype, grad_dtype):
args = {'x1_dtype': x1_dtype, 'x2_dtype': x2_dtype, 'grad_dtype': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16], self.name)
return x1_dtype
class MaximumGrad(Primitive):
"""Grad for maximum."""
......@@ -564,6 +609,54 @@ class MaxPoolGradWithArgmax(_PoolGrad):
return grad_dtype
class MaxPoolGradGradWithArgmax(_PoolGrad):
r"""
Computes the gradients of MaxPoolGradWithArgmax.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **x** (Tensor) - Tensor with data format "NCHW", data type should be float16.
- **grad** (Tensor) - Data type same as `x`.
- **argmax** (Tensor) - Data type should be uint16 or int64.
Outputs:
Tensor, With data type same as `x`.
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradGradWithArgmax, self).__init__(ksize, strides, padding)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:
raise TypeError("The dout of MaxPoolGradGradWithArgmax should be a Tensor.")
return x_shape
def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype):
args = {'x_dtype': x_dtype, 'grad_dtype': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16], self.name)
return grad_dtype
class MinimumGrad(Primitive):
"""Grad for minimum."""
......
......@@ -1585,6 +1585,20 @@ test_case_nn_ops = [
'desc_inputs': [Tensor([0, 1, 2, 3], mstype.int32)],
'desc_bprop': [],
'skip': ['backward']}),
('MaxPoolGradGrad', {
'block': G.MaxPoolGradGrad(),
'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
Tensor(np.random.rand(1, 1, 2, 2), mstype.float16)],
'desc_bprop': [],
'skip': ['backward']}),
('MaxPoolGradGradWithArgmax', {
'block': G.MaxPoolGradGradWithArgmax(),
'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
Tensor(np.random.rand(1, 1, 2, 2), mstype.float16),
Tensor(np.zeros((1, 1, 2, 2)), mstype.uint16)],
'desc_bprop': [],
'skip': ['backward']}),
]
test_case_array_ops = [
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册