From 42eb8b08c225410e66f25bcb5b2c196ca16a3b71 Mon Sep 17 00:00:00 2001 From: buxue Date: Sun, 26 Apr 2020 15:25:26 +0800 Subject: [PATCH] fix bugs of Acosh, TopK, ResizeNearestNeighbor, DepthwiseConv2dNative --- mindspore/ccsrc/transform/util.cc | 19 ++++++++----------- mindspore/nn/optim/momentum.py | 2 +- mindspore/ops/operations/array_ops.py | 5 +++++ mindspore/ops/operations/math_ops.py | 3 ++- mindspore/ops/operations/nn_ops.py | 23 ++++++++++++++--------- tests/ut/python/ops/test_ops.py | 4 ++-- 6 files changed, 32 insertions(+), 24 deletions(-) diff --git a/mindspore/ccsrc/transform/util.cc b/mindspore/ccsrc/transform/util.cc index b1120ade6..3f856fe56 100644 --- a/mindspore/ccsrc/transform/util.cc +++ b/mindspore/ccsrc/transform/util.cc @@ -171,20 +171,17 @@ GeTensorPtr TransformUtil::ConvertTensor(const MeTensorPtr &tensor, const std::s MS_LOG(ERROR) << "The Me Tensor data type size is wrong, type size is: " << type_size; return nullptr; } - // get tensor buff size - size_t data_buff_size = 0; size_t elements_num = IntToSize(tensor->ElementsNum()); - if (elements_num > 0 && type_size > 0 && UINT_MAX / type_size >= elements_num) { - data_buff_size = elements_num * type_size; + if (UINT_MAX / type_size < elements_num) { + MS_LOG(ERROR) << "The required Me Tensor data buff size " << elements_num << " x " << type_size + << " overflowed UINT_MAX: " << UINT_MAX << "."; + return nullptr; } + + // get tensor buff size + size_t data_buff_size = elements_num * type_size; if (data_buff_size == 0) { - if (elements_num > 0 && type_size > 0 && UINT_MAX / type_size < elements_num) { - MS_LOG(ERROR) << "The required Me Tensor data buff size " << elements_num << " x " << type_size - << " overflowed UINT_MAX: " << UINT_MAX << "."; - } else { - MS_LOG(ERROR) << "The Me Tensor data buff size is 0."; - } - return nullptr; + MS_LOG(INFO) << "The Me Tensor data buff size is 0."; } // create ge tensor auto desc = GetGeTensorDesc(tensor->shape_c(), tensor->data_type(), format); diff --git a/mindspore/nn/optim/momentum.py b/mindspore/nn/optim/momentum.py index c69e226df..67de590c5 100755 --- a/mindspore/nn/optim/momentum.py +++ b/mindspore/nn/optim/momentum.py @@ -56,7 +56,7 @@ class Momentum(Optimizer): - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. Outputs: - Tensor[bool], the value is True. + tuple[bool], all elements are True. Raises: ValueError: If the momentum is less than 0.0. diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a97cf5e43..22d8bfd46 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1910,6 +1910,11 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): @prim_attr_register def __init__(self, size, align_corners=False): """Init ResizeNearestNeighbor""" + validator.check_value_type("size", size, [tuple, list], self.name) + validator.check_value_type("align_corners", align_corners, [bool], self.name) + validator.check_integer("length of size", len(size), 2, Rel.EQ, self.name) + for i, value in enumerate(size): + validator.check_integer(f'{i}th value of size', value, 0, Rel.GE, self.name) self.init_prim_io_names(inputs=['image_in'], outputs=['image_out']) def infer_shape(self, x): diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 8de410843..1dfe93136 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1251,7 +1251,8 @@ class Acosh(PrimitiveWithInfer): Compute inverse hyperbolic cosine of x element-wise. Inputs: - - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. + - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`, + and the data type of 'input_x' is number, the element in 'input_x' should be greater than or equal to 1. Outputs: Tensor, has the same shape as `input_x`. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index c0e5087b8..1e3254662 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -753,8 +753,15 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): self.init_prim_io_names(inputs=['x', 'w'], outputs=['output']) self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name) self.stride = _check_positive_int_or_tuple('stride', stride, self.name) + if self.stride[0] != self.stride[1]: + raise ValueError("The height and width of stride should be equal," + f"but got height:{self.stride[0]}, width:{self.stride[1]}") self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1])) + self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name) + if self.dilation[0] != self.dilation[1]: + raise ValueError("The height and width of dilation should be equal," + f"but got height:{self.dilation[0]}, width:{self.dilation[1]}") self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1])) validator.check_value_type('pad', pad, (int,), self.name) self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name) @@ -771,13 +778,11 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name) validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name) - kernel_size_h = w_shape[2] - kernel_size_w = w_shape[3] - stride_h = self.stride[2] - stride_w = self.stride[3] - dilation_h = self.dilation[2] - dilation_w = self.dilation[3] - + kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape + _, _, stride_h, stride_w = self.stride + _, _, dilation_h, dilation_w = self.dilation + if kernel_size_n != 1: + raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}") if self.pad_mode == "valid": h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h) w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w) @@ -1198,8 +1203,8 @@ class TopK(PrimitiveWithInfer): >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16) >>> k = 3 >>> values, indices = topk(input_x, k) - >>> assert values == Tensor(np.array([5, 4, 3])) - >>> assert indices == Tensor(np.array([4, 3, 2])) + >>> assert values == Tensor(np.array([5, 4, 3]), mstype.float16) + >>> assert indices == Tensor(np.array([4, 3, 2]), mstype.int32) """ @prim_attr_register diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index d6622e76f..bd1ce1582 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -793,8 +793,8 @@ test_case_nn_ops = [ 'desc_bprop': [[5, 5]]}), ('DepthwiseConv2dNative_1', { 'block': P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2), - 'desc_inputs': [[10, 32, 32, 32], [3, 32, 3, 3]], - 'desc_bprop': [[10, 30, 16, 16]]}), + 'desc_inputs': [[10, 32, 32, 32], [1, 32, 3, 3]], + 'desc_bprop': [[10, 32, 16, 16]]}), ('DepthwiseConv2dNative_2', { 'block': P.DepthwiseConv2dNative(1, (3, 3), pad_mode="same", pad=0, stride=1), 'desc_inputs': [[2592, 2048, 4, 4], [1, 2048, 3, 3]], -- GitLab