diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index b34d452cbaaa7f86fb0e6832909b73da45d17bf2..51c804dc39b9a62eedb3a0f58a644d3ad3eee30f 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -146,6 +146,40 @@ def get_bprop_max_pool_with_argmax(self): return bprop +@bprop_getters.register(G.MaxPoolGrad) +def get_bprop_max_pool_grad_grad(self): + """Grad definition for `MaxPoolGrad` operation.""" + maxpool_grad_grad = G.MaxPoolGradGrad( + ksize=self.ksize, + strides=self.strides, + padding=self.padding) + + def bprop(x1, x2, grad, out, dout): + dx1 = zeros_like(x1) + dx2 = zeros_like(x2) + dgrad = maxpool_grad_grad(x1, x2, dout) + return (dx1, dx2, dgrad) + + return bprop + + +@bprop_getters.register(G.MaxPoolGradGrad) +def get_bprop_max_pool_grad_grad_grad(self): + """Grad definition for `MaxPoolGradGrad` operation.""" + maxpool_grad = G.MaxPoolGrad( + ksize=self.ksize, + strides=self.strides, + padding=self.padding) + + def bprop(x1, x2, grad, out, dout): + dx1 = zeros_like(x1) + dx2 = zeros_like(x2) + dgrad = maxpool_grad(x1, x2, dout) + return (dx1, dx2, dgrad) + + return bprop + + @bprop_getters.register(P.MaxPool) def get_bprop_max_pool_grad(self): """Grad definition for `MaxPool` operation.""" diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index a4f8aa42ed3b32404394a866325db0dfa139f708..35785a085c4e5c69cd170aac7185f655edc2512e 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -282,3 +282,5 @@ from .scatter_sub import _scatter_sub_tbe from .scatter_mul import _scatter_mul_tbe from .scatter_div import _scatter_div_tbe from .mod import _mod_tbe +from .max_pool_grad_grad import _max_pool_grad_grad_tbe +from .max_pool_grad_grad_with_argmax import _max_pool_grad_grad_with_argmax_tbe diff --git a/mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py b/mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..cdaceb2b0e058736e1bcd1c0e8617cee4d06bf2f --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""MaxPoolGradGrad op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +max_pool_grad_grad_op_info = TBERegOp("MaxPoolGradGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("max_pool_grad_grad.so") \ + .compute_cost(10) \ + .kernel_name("max_pool_grad_grad") \ + .partial_flag(True) \ + .attr("ksize", "required", "listInt", "all") \ + .attr("strides", "required", "listInt", "all") \ + .attr("padding", "required", "str", "all") \ + .attr("data_format", "optional", "str", "all") \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "grad", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .get_op_info() + + +@op_info_register(max_pool_grad_grad_op_info) +def _max_pool_grad_grad_tbe(): + """MaxPoolGradGrad TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py b/mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py new file mode 100644 index 0000000000000000000000000000000000000000..52a9392f3e51a57d85a854544931fd4e9f906e2d --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""MaxPoolGradGradWithArgmax op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +max_pool_grad_grad_with_argmax_op_info = TBERegOp("MaxPoolGradGradWithArgmax") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("max_pool_grad_grad_with_argmax.so") \ + .compute_cost(10) \ + .kernel_name("max_pool_grad_grad_with_argmax") \ + .partial_flag(True) \ + .attr("ksize", "required", "listInt", "all") \ + .attr("strides", "required", "listInt", "all") \ + .attr("padding", "required", "str", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "grad", False, "required", "all") \ + .input(2, "argmax", False, "optional", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.U16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.I64_5HD, DataType.F16_5HD) \ + .get_op_info() + + +@op_info_register(max_pool_grad_grad_with_argmax_op_info) +def _max_pool_grad_grad_with_argmax_tbe(): + """MaxPoolGradGradWithArgmax TBE register""" + return diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index 94ba2f1bd9681efeb0f5c7cb507b47fcf611e2d5..8f9841b1690a63724bfcdb36e6827aec80399b50 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -536,6 +536,51 @@ class MaxPoolGrad(_PoolGrad): return x1_dtype +class MaxPoolGradGrad(_PoolGrad): + r""" + Performs gradients of the MaxPoolGrad operation. + + Args: + ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value, + is an int number that represents height and width are both ksize, or a tuple + of two int numbers that represent height and width respectively. Default: 1. + strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". + + - same: Adopts the way of completion. Output height and width will be the same as + the input. Total number of padding will be calculated for horizontal and vertical + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. + + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. + + Inputs: + - **origin_input** (Tensor) - Tensor with data format "NCHW", data type should be float16. + - **origin_output** (Tensor) - Data type same as `origin_input`. + - **grad** (Tensor) - Data type same as `origin_input`. + + Outputs: + Tensor, With data type same as `origin_input`. + + """ + + @prim_attr_register + def __init__(self, ksize=1, strides=1, padding="VALID"): + super(MaxPoolGradGrad, self).__init__(ksize, strides, padding) + + def infer_shape(self, x1_shape, x2_shape, grad_shape): + return x1_shape + + def infer_dtype(self, x1_dtype, x2_dtype, grad_dtype): + args = {'x1_dtype': x1_dtype, 'x2_dtype': x2_dtype, 'grad_dtype': grad_dtype} + validator.check_tensor_type_same(args, [mstype.float16], self.name) + return x1_dtype + + class MaximumGrad(Primitive): """Grad for maximum.""" @@ -564,6 +609,54 @@ class MaxPoolGradWithArgmax(_PoolGrad): return grad_dtype +class MaxPoolGradGradWithArgmax(_PoolGrad): + r""" + Computes the gradients of MaxPoolGradWithArgmax. + + Args: + ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value, + is an int number that represents height and width are both ksize, or a tuple + of two int numbers that represent height and width respectively. Default: 1. + strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". + + - same: Adopts the way of completion. Output height and width will be the same as + the input. Total number of padding will be calculated for horizontal and vertical + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. + + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. + + Inputs: + - **x** (Tensor) - Tensor with data format "NCHW", data type should be float16. + - **grad** (Tensor) - Data type same as `x`. + - **argmax** (Tensor) - Data type should be uint16 or int64. + + Outputs: + Tensor, With data type same as `x`. + + """ + + @prim_attr_register + def __init__(self, ksize=1, strides=1, padding="VALID"): + self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output']) + super(MaxPoolGradGradWithArgmax, self).__init__(ksize, strides, padding) + + def infer_shape(self, x_shape, grad_shape, argmax_shape): + if not grad_shape: + raise TypeError("The dout of MaxPoolGradGradWithArgmax should be a Tensor.") + return x_shape + + def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype): + args = {'x_dtype': x_dtype, 'grad_dtype': grad_dtype} + validator.check_tensor_type_same(args, [mstype.float16], self.name) + return grad_dtype + + class MinimumGrad(Primitive): """Grad for minimum.""" diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index a99b231fa7062ce6ca554818c43afd71f5234b25..5262145c80bc1ae6e2bd900d8a00a84a29306153 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -1585,6 +1585,20 @@ test_case_nn_ops = [ 'desc_inputs': [Tensor([0, 1, 2, 3], mstype.int32)], 'desc_bprop': [], 'skip': ['backward']}), + ('MaxPoolGradGrad', { + 'block': G.MaxPoolGradGrad(), + 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2), mstype.float16), + Tensor(np.random.rand(1, 1, 2, 2), mstype.float16), + Tensor(np.random.rand(1, 1, 2, 2), mstype.float16)], + 'desc_bprop': [], + 'skip': ['backward']}), + ('MaxPoolGradGradWithArgmax', { + 'block': G.MaxPoolGradGradWithArgmax(), + 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2), mstype.float16), + Tensor(np.random.rand(1, 1, 2, 2), mstype.float16), + Tensor(np.zeros((1, 1, 2, 2)), mstype.uint16)], + 'desc_bprop': [], + 'skip': ['backward']}), ] test_case_array_ops = [