diff --git a/tests/st/ops/ascend/test_add.py b/tests/st/ops/ascend/test_add.py index 0fa8e2d204da9fc3304c18389bdd3d0f655b3ec9..6a07bb879ffa8956ac0339288367ce49786f9f7f 100644 --- a/tests/st/ops/ascend/test_add.py +++ b/tests/st/ops/ascend/test_add.py @@ -27,8 +27,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.add = P.TensorAdd() - def construct(self, x, y): - return self.add(x, y) + def construct(self, x_, y_): + return self.add(x_, y_) x = np.ones([1, 3, 3, 4]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_biasAddGrad.py b/tests/st/ops/ascend/test_biasAddGrad.py index 5d1e9668648d5e956a96d5482f474fe1df7409e2..e01a2bbd5b961a6c300eb3fc591c57243565ccc0 100644 --- a/tests/st/ops/ascend/test_biasAddGrad.py +++ b/tests/st/ops/ascend/test_biasAddGrad.py @@ -31,8 +31,8 @@ class Net(nn.Cell): # 'normal', [2, 3, 3, 4]), name='dout') @ms_function - def construct(self, dout): - return self.bias_add_grad(dout) + def construct(self, dout_): + return self.bias_add_grad(dout_) dout = np.ones([2, 3, 4, 4]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_conv2dGradFilter.py b/tests/st/ops/ascend/test_conv2dGradFilter.py index 96c878bd467e7ff9fc392cb23fa4514e2c6618bb..928f3cccf23bdac1852e6f2705c210a8b24734d9 100644 --- a/tests/st/ops/ascend/test_conv2dGradFilter.py +++ b/tests/st/ops/ascend/test_conv2dGradFilter.py @@ -34,8 +34,8 @@ class Net(nn.Cell): self.get_shape = P.Shape() @ms_function - def construct(self, x, out): - return self.conv2d_grad(out, x, self.get_shape(self.y)) + def construct(self, x_, out_): + return self.conv2d_grad(out_, x_, self.get_shape(self.y)) x = Tensor(np.array([[[ diff --git a/tests/st/ops/ascend/test_drop_out_gen_mask.py b/tests/st/ops/ascend/test_drop_out_gen_mask.py index 64360a6e24db4ad3f1832c0b3b9951646030afce..6771a3a68b03b7eb930cce473f0630f923750c1b 100644 --- a/tests/st/ops/ascend/test_drop_out_gen_mask.py +++ b/tests/st/ops/ascend/test_drop_out_gen_mask.py @@ -29,9 +29,9 @@ class Net(nn.Cell): self.mask = P.DropoutGenMask(10, 28) self.shape = P.Shape() - def construct(self, x, y): - shape_x = self.shape(x) - return self.mask(shape_x, y) + def construct(self, x_, y_): + shape_x = self.shape(x_) + return self.mask(shape_x, y_) x = np.ones([2, 4, 2, 2]).astype(np.int32) diff --git a/tests/st/ops/ascend/test_equal_count.py b/tests/st/ops/ascend/test_equal_count.py index a204a48c6b3b579cf198e321d92ac3ece599e8f6..bd73d8745e68ba2f419da2081663cb40171152d4 100644 --- a/tests/st/ops/ascend/test_equal_count.py +++ b/tests/st/ops/ascend/test_equal_count.py @@ -27,8 +27,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.equal_count = P.EqualCount() - def construct(self, x, y): - return self.equal_count(x, y) + def construct(self, x_, y_): + return self.equal_count(x_, y_) x = np.random.randn(32).astype(np.int32) diff --git a/tests/st/ops/ascend/test_matmul.py b/tests/st/ops/ascend/test_matmul.py index 3981e59f74abb049b7838ad811eaf217c6db4b7f..02d216162af8eb36bc4d569dc83cc84d1bdc0ee3 100644 --- a/tests/st/ops/ascend/test_matmul.py +++ b/tests/st/ops/ascend/test_matmul.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.matmul = P.MatMul() @ms_function - def construct(self, x1, x2): - return self.matmul(x1, x2) + def construct(self, x1_, x2_): + return self.matmul(x1_, x2_) x1 = np.random.randn(1, 3).astype(np.float32) diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax.py b/tests/st/ops/ascend/test_maxpool_with_argmax.py index efb9a16234b62758cbd4f402573ba755dc379b88..e87748939b5026c20718153b8e2dd6b1e39a74be 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np - import mindspore.context as context import mindspore.nn as nn from mindspore.common.api import ms_function diff --git a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py index 3f47e03fbd4f49d7e794c0059554fd35182fec6b..738201f3d83cf9396e958b038b23e7086c4258bc 100644 --- a/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py +++ b/tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py @@ -63,7 +63,7 @@ def test_net(): expect = loss_np SparseSoftmaxCrossEntropyWithLogits = Net() loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels)) - '''assert''' +# assert assert np.allclose(expect.flatten(), loss_me.asnumpy().flatten(), 0.01, 0.01) print(loss_me.asnumpy().flatten()) print("-------------------------") diff --git a/tests/st/ops/ascend/test_tbe_ops/test_add.py b/tests/st/ops/ascend/test_tbe_ops/test_add.py index af33e9c003a016275be918016e6e0851e7884e86..bdf03da9436824fff8a502b4aac2abf8bf1ffad2 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_add.py @@ -25,8 +25,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.add = P.TensorAdd() - def construct(self, x, y): - return self.add(x, y) + def construct(self, x_, y_): + return self.add(x_, y_) x = np.random.randn(1, 3, 3, 4).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py index 3d112d245182dd661b11b703ca6772795743310e..a98f2a5371ff24db3a333a23737652fd71d8709c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py @@ -65,12 +65,10 @@ def test_conv2d_backprop_filter(): conv2d_filter = Net() output = conv2d_filter() print("================================") - """ - expect output: - [[[[ -60, -142, -265] - [-104, -211, -322] - [-102, -144, -248]]]] - """ +# expect output: +# [[[[ -60, -142, -265] +# [-104, -211, -322] +# [-102, -144, -248]]]] expect = np.array([[[[-60, -142, -265], [-104, -211, -322], [-102, -144, -248]]]]).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py index d0ef791b9dac3c62c828359e5af8dd9475029df5..63ef0289042fb524549947cc4f3caa1cd4ba2ab7 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py @@ -64,15 +64,13 @@ def test_conv2d_backprop_input(): conv2d_input = Net() output = conv2d_input() print("================================") - """ - expect output: - [[[[ -5, -4, 5, 12, 0, -8] - [-15, -6, 17, 17, -2, -11] - [-15, -8, 13, 12, 2, -4] - [-13, -6, 8, -14, 5, 20] - [ -3, -4, -4, -19, 7, 23] - [ -3, -2, 0, -14, 3, 16]]]] - """ +# expect output: +# [[[[ -5, -4, 5, 12, 0, -8] +# [-15, -6, 17, 17, -2, -11] +# [-15, -8, 13, 12, 2, -4] +# [-13, -6, 8, -14, 5, 20] +# [ -3, -4, -4, -19, 7, 23] +# [ -3, -2, 0, -14, 3, 16]]]] expect = np.array([[[[-5, -4, 5, 12, 0, -8], [-15, -6, 17, 17, -2, -11], [-15, -8, 13, 12, 2, -4], diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py index d0c8a97d89fee0cdcb08ed166aee92b54602ec7c..0f890ea9987ce94c2ecb03ed73d5bb25c9accd77 100755 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py @@ -59,7 +59,7 @@ def gelu_backward_cmp(input_shape): class MEGeluLargeIn(Cell): def __init__(self): - super(GELU, self).__init__() + super(MEGeluLargeIn, self).__init__() self.matmul = P.MatMul() self.gelu = P.Gelu() @@ -79,7 +79,7 @@ class GradLargeIn(Cell): def gelu_backward_me_large_in_impl(x1, x2, output_grad): - n = GradLargeIn() + n = GELU() grad_with_sense = GradLargeIn(n) grad_with_sense.set_train() input_grad = grad_with_sense(x1, x2, output_grad) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_less.py b/tests/st/ops/ascend/test_tbe_ops/test_less.py index ccffaaf5f1257a1715f915f5e1ca59e0bc031a16..bc29054ae3ce625e480316741cd5ed9cd4a825fc 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_less.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_less.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.less = P.Less() @ms_function - def construct(self, x1, x2): - return self.less(x1, x2) + def construct(self, x1_, x2_): + return self.less(x1_, x2_) x1 = np.random.randn(3, 4).astype(np.float16) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py b/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py index d6af031905f4ad6acb3112027fd37d48b4e63399..0bdd5cd955706b153aaec6bdee8e2def29646dd6 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_less_equal.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.less_equal = P.LessEqual() @ms_function - def construct(self, x1, x2): - return self.less_equal(x1, x2) + def construct(self, x1_, x2_): + return self.less_equal(x1_, x2_) x1 = np.random.randn(3, 4).astype(np.float16) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py index 7c7e5db1f185fca3148910aa9b0512d48003a7ad..19ea6ce7ac11f711553bc094c1bb4e5ce5597a27 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_and.py @@ -28,8 +28,8 @@ class Net(nn.Cell): self.logical_and = P.LogicalAnd() @ms_function - def construct(self, x1, x2): - return self.logical_and(x1, x2) + def construct(self, x1_, x2_): + return self.logical_and(x1_, x2_) x1 = [True, True, False, False, True, True, False, False] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py index 333298a2e2b222b8bbca021513ab6d7844cf90c2..a530ec6216961687bea31a4da1cf2d5da3b6a633 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_not.py @@ -28,8 +28,8 @@ class Net(nn.Cell): self.logical_not = P.LogicalNot() @ms_function - def construct(self, x1): - return self.logical_not(x1) + def construct(self, x): + return self.logical_not(x) x1 = [True, True, False, False, True, True, False, False] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py index 5dfb8fd76581b5eb40b8f1397958eb5a5e427e32..f8fda645564a1591b3af9a2acf7b96ec16d2a456 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_logical_or.py @@ -28,8 +28,8 @@ class Net(nn.Cell): self.logical_or = P.LogicalOr() @ms_function - def construct(self, x1, x2): - return self.logical_or(x1, x2) + def construct(self, x1_, x2_): + return self.logical_or(x1_, x2_) x1 = [True, True, False, False, True, True, False, False] diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py index 65f1c11060c2295dc936a0090ed6fca94362ad93..84d3e2f28d519dfcfdb9e9348c43ceb51a481bd9 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul.py @@ -27,8 +27,8 @@ class Net(nn.Cell): self.matmul = P.MatMul() @ms_function - def construct(self, x1, x2): - return self.matmul(x1, x2) + def construct(self, x1_, x2_): + return self.matmul(x1_, x2_) x1 = np.random.randn(1, 3).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py index 80409ec7126ba70c2fe922c7544d7a54563915cf..0926a9882c630732ae437d006696da530fb8add6 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py @@ -29,8 +29,8 @@ class Net(nn.Cell): self.matmul = P.MatMul(transpose_b=True) @ms_function - def construct(self, x1, x2): - return self.matmul(x1, x2) + def construct(self, x1_, x2_): + return self.matmul(x1_, x2_) x1 = np.random.randn(10, 1).astype(np.float32) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py index aad47e4aa0f617ba928b687a1241c86cf8607748..529343812eb0100ecd7d78c34d847bb2cbd6b672 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py @@ -44,15 +44,15 @@ class GradWrap(Cell): return gout -def gen_data(inputA_np, inputB_np, grad=None): +def gen_data(inputA_np, inputB_np, grad_=None): inputA_me = inputA_np if isinstance(inputA_np, np.ndarray): inputA_me = Tensor(inputA_me) inputB_me = inputB_np if isinstance(inputB_np, np.ndarray): inputB_me = Tensor(inputB_np) - if grad is None: - grad = np.random.randn(2).astype(np.float32) + if grad_ is None: + grad_ = np.random.randn(2).astype(np.float32) print("----inputA---") print(inputA_np) print("----inputB---") @@ -60,7 +60,7 @@ def gen_data(inputA_np, inputB_np, grad=None): net_me = GradWrap(MaxNetMe()) net_me.set_train() - output = net_me(inputA_me, inputB_me, Tensor(grad)) + output = net_me(inputA_me, inputB_me, Tensor(grad_)) print("---me---") print(output[0].asnumpy()) print(output[1].asnumpy()) diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py index aafe43831086a9b0f470155e028c4332c46ecfb6..3a19aaa1d16503e386f32c0a63d5acfe37ab0120 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py @@ -44,7 +44,7 @@ class GradWrap(Cell): return gout -def gen_data(inputA_np, inputB_np, grad=None): +def gen_data(inputA_np, inputB_np, grad_=None): inputA_me = inputA_np if isinstance(inputA_np, np.ndarray): inputA_me = Tensor(inputA_me) @@ -53,12 +53,12 @@ def gen_data(inputA_np, inputB_np, grad=None): if isinstance(inputB_np, np.ndarray): inputB_me = Tensor(inputB_np) - if grad is None: - grad = np.random.randn(1, 3, 2, 2).astype(np.float32) + if grad_ is None: + grad_ = np.random.randn(1, 3, 2, 2).astype(np.float32) print(inputA_np) print(inputB_np) - print(grad) + print(grad_) net_me = GradWrap(MinNetMe()) net_me.set_train() diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py index b26fe30697d33633bc80e9010bda00135cd3e690..40dc5ebadaefca1696816a69831ef37da7334484 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py @@ -31,8 +31,8 @@ class Grad(nn.Cell): self.network = network @ms_function - def construct(self, input, output_grad): - return self.grad(self.network)(input, output_grad) + def construct(self, inputValue, output_grad): + return self.grad(self.network)(inputValue, output_grad) class Net(nn.Cell): diff --git a/tests/st/ops/ascend/test_tdt_data_ms.py b/tests/st/ops/ascend/test_tdt_data_ms.py index d680ac0a55777b14ab9e9b1d5d746922a1da03c2..1cac1004fd0ae5bd2fbc5036901899068ce622ee 100644 --- a/tests/st/ops/ascend/test_tdt_data_ms.py +++ b/tests/st/ops/ascend/test_tdt_data_ms.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np import sys +import numpy as np import mindspore.context as context import mindspore.dataset as ds @@ -31,8 +31,8 @@ SCHEMA_DIR = "{0}/resnet_all_datasetSchema.json".format(data_path) def test_me_de_train_dataset(): data_list = ["{0}/train-00001-of-01024.data".format(data_path)] - data_set = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR, - columns_list=["image/encoded", "image/class/label"]) + data_set_new = ds.TFRecordDataset(data_list, schema=SCHEMA_DIR, + columns_list=["image/encoded", "image/class/label"]) resize_height = 224 resize_width = 224 @@ -42,21 +42,21 @@ def test_me_de_train_dataset(): # define map operations decode_op = vision.Decode() - resize_op = vision.Resize(resize_height, resize_width, + resize_op = vision.Resize((resize_height, resize_width), Inter.LINEAR) # Bilinear as default rescale_op = vision.Rescale(rescale, shift) # apply map operations on images - data_set = data_set.map(input_columns="image/encoded", operations=decode_op) - data_set = data_set.map(input_columns="image/encoded", operations=resize_op) - data_set = data_set.map(input_columns="image/encoded", operations=rescale_op) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=decode_op) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=resize_op) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=rescale_op) hwc2chw_op = vision.HWC2CHW() - data_set = data_set.map(input_columns="image/encoded", operations=hwc2chw_op) - data_set = data_set.repeat(1) + data_set_new = data_set_new.map(input_columns="image/encoded", operations=hwc2chw_op) + data_set_new = data_set_new.repeat(1) # apply batch operations - batch_size = 32 - data_set = data_set.batch(batch_size, drop_remainder=True) - return data_set + batch_size_new = 32 + data_set_new = data_set_new.batch(batch_size_new, drop_remainder=True) + return data_set_new def convert_type(shapes, types): diff --git a/tests/st/ops/cpu/test_concat_op.py b/tests/st/ops/cpu/test_concat_op.py index 9d5067a35d6019779344e60f5abce8baab7a9695..c2a1d07853c873666b0f4e51dc2542235d0f3baa 100644 --- a/tests/st/ops/cpu/test_concat_op.py +++ b/tests/st/ops/cpu/test_concat_op.py @@ -14,10 +14,10 @@ # ============================================================================ import pytest +import numpy as np from mindspore import Tensor from mindspore.ops import operations as P import mindspore.nn as nn -import numpy as np import mindspore.context as context from mindspore.common import dtype as mstype diff --git a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py index a422468b2e13c78eb8993428e37d186c7361d925..87a0f735a209ff62fff19a3fc95c012489d602e3 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py @@ -68,12 +68,10 @@ def test_conv2d_backprop_filter(): conv2d_filter = Net4() output = conv2d_filter() print("================================") - """ - expect output: - [[[[ -60, -142, -265] - [-104, -211, -322] - [-102, -144, -248]]]] - """ +# expect output: +# [[[[ -60, -142, -265] +# [-104, -211, -322] +# [-102, -144, -248]]]] expect = np.array([[[[-60, -142, -265], [-104, -211, -322], [-102, -144, -248]]]]).astype(np.float32) diff --git a/tests/st/ops/cpu/test_conv2d_backprop_input_op.py b/tests/st/ops/cpu/test_conv2d_backprop_input_op.py index a75e676507f6526e02ba0c8c9e195009b2fb6be8..7945f3828fe07229c6941e723aaec6a47ac13b14 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_input_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_input_op.py @@ -66,16 +66,14 @@ class Net5(nn.Cell): def test_conv2d_backprop_input(): conv2d_input = Net5() output = conv2d_input() - print("================================") - """ - expect output: - [[[[ -5, -4, 5, 12, 0, -8] - [-15, -6, 17, 17, -2, -11] - [-15, -8, 13, 12, 2, -4] - [-13, -6, 8, -14, 5, 20] - [ -3, -4, -4, -19, 7, 23] - [ -3, -2, 0, -14, 3, 16]]]] - """ + print("================================") +# expect output: +# [[[[ -5, -4, 5, 12, 0, -8] +# [-15, -6, 17, 17, -2, -11] +# [-15, -8, 13, 12, 2, -4] +# [-13, -6, 8, -14, 5, 20] +# [ -3, -4, -4, -19, 7, 23] +# [ -3, -2, 0, -14, 3, 16]]]] expect = np.array([[[[-5, -4, 5, 12, 0, -8], [-15, -6, 17, 17, -2, -11], [-15, -8, 13, 12, 2, -4], diff --git a/tests/st/ops/cpu/test_conv2d_op.py b/tests/st/ops/cpu/test_conv2d_op.py index 627a722b5b844dfd4c8a1db4f85bc8b7b3938111..454f32eac7ee7389277acd45525a5206599d13d4 100644 --- a/tests/st/ops/cpu/test_conv2d_op.py +++ b/tests/st/ops/cpu/test_conv2d_op.py @@ -55,16 +55,13 @@ def test_conv2d(): conv2d = NetConv2d() output = conv2d() print("================================") - """ - expect output: - [[[[ 45. 48. 51.] - [ 54. 57. 60.] - [ 63. 66. 69.]] - - [[126. 138. 150.] - [162. 174. 186.] - [198. 210. 222.]]]] - """ +# expect output: +# [[[[ 45. 48. 51.] +# [ 54. 57. 60.] +# [ 63. 66. 69.]] +# [[126. 138. 150.] +# [162. 174. 186.] +# [198. 210. 222.]]]] expect = np.array([[[[45, 48, 51], [54, 57, 60], [63, 66, 69]], diff --git a/tests/st/ops/cpu/test_gather_op.py b/tests/st/ops/cpu/test_gather_op.py index 50fb2096dd575d6d28c953790a71210449ce0c97..b9ac24ad3582752227bdcef9d21706aeabcc316d 100644 --- a/tests/st/ops/cpu/test_gather_op.py +++ b/tests/st/ops/cpu/test_gather_op.py @@ -14,11 +14,10 @@ # ============================================================================ import pytest +import numpy as np from mindspore import Tensor from mindspore.ops import operations as P import mindspore.nn as nn -from mindspore.common.api import ms_function -import numpy as np import mindspore.context as context from mindspore.common import dtype as mstype @@ -96,7 +95,7 @@ def test_gatherv2_axisN1(): expect = np.array([[[1., 2.], [4., 5.]], [[7., 8.], - [10.,11.]]]) + [10., 11.]]]) error = np.ones(shape=ms_output.asnumpy().shape) * 1.0e-6 diff = ms_output.asnumpy() - expect assert np.all(diff < error) diff --git a/tests/st/ops/cpu/test_momentum_op.py b/tests/st/ops/cpu/test_momentum_op.py index 43ba785aed2ae5bf695c834c90fbb073d4f2d983..717925c23e61a7809cfe6239b5b52b290e89ec9f 100644 --- a/tests/st/ops/cpu/test_momentum_op.py +++ b/tests/st/ops/cpu/test_momentum_op.py @@ -65,10 +65,8 @@ def test_momentum(): print("================================") print(losses) - """ - expect output: - [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 - 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] - """ +# expect output: +# [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 +# 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] return losses diff --git a/tests/st/ops/cpu/test_slice_op.py b/tests/st/ops/cpu/test_slice_op.py index 0f0aa53d04bb106ceadb4193c605b40e2a82b3e9..90c777ef502a01f7b70816e3f9d0218198cc047e 100644 --- a/tests/st/ops/cpu/test_slice_op.py +++ b/tests/st/ops/cpu/test_slice_op.py @@ -41,8 +41,8 @@ def test_slice(): expect = [[[2., -2., 2.]], [[4., -4., 4.]]] - slice = Slice() - output = slice(x) + slice_op = Slice() + output = slice_op(x) print("output:\n", output) assert (output.asnumpy() == expect).all() diff --git a/tests/st/ops/custom_ops_tbe/add3_impl.py b/tests/st/ops/custom_ops_tbe/add3_impl.py index f169ff40d7a3d8f66928cdb491fae56b9dde55e9..36f296d4c1e679541dd47747d8086bbf7cd844b9 100644 --- a/tests/st/ops/custom_ops_tbe/add3_impl.py +++ b/tests/st/ops/custom_ops_tbe/add3_impl.py @@ -13,17 +13,17 @@ # limitations under the License. # ============================================================================ from __future__ import absolute_import +import te.lang.cce from te import tvm +from te.platform.fusion_manager import fusion_manager from topi import generic -import te.lang.cce from topi.cce import util -from te.platform.fusion_manager import fusion_manager from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType @fusion_manager.register("add3") def add3_compute(input1, input2, const_bias): sum2 = te.lang.cce.vadd(input1, input2) - sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype = input1.dtype)) + sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype=input1.dtype)) return sum3 @@ -44,7 +44,7 @@ cus_add3_op_info = TBERegOp("CusAdd3") \ @op_info_register(cus_add3_op_info) -def CusAdd3Impl(input1, inptu2, sum, const_bias, kernel_name="CusAdd3Impl"): +def CusAdd3Impl(input1, inptu2, sum1, const_bias, kernel_name="CusAdd3Impl"): shape = input1.get("shape") shape = util.shape_refine(shape) dtype = input1.get("dtype").lower() diff --git a/tests/st/ops/custom_ops_tbe/cus_add3.py b/tests/st/ops/custom_ops_tbe/cus_add3.py index ae0c4d32052b78adc00c2213776a10e844000299..a534be3eae4cd5d0ffa7788ae82d0b6eef82680c 100644 --- a/tests/st/ops/custom_ops_tbe/cus_add3.py +++ b/tests/st/ops/custom_ops_tbe/cus_add3.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import numpy as np from mindspore.ops import prim_attr_register, PrimitiveWithInfer -from mindspore.ops import operations as P -from mindspore import Tensor # sum = input1 + input2 + const_bias class CusAdd3(PrimitiveWithInfer): diff --git a/tests/st/ops/custom_ops_tbe/cus_square.py b/tests/st/ops/custom_ops_tbe/cus_square.py index 59ba7988699a47b414bb23b7deda5bb3aa28fe44..be43d2f5deb10b410ff4516eed638ee14b639d23 100644 --- a/tests/st/ops/custom_ops_tbe/cus_square.py +++ b/tests/st/ops/custom_ops_tbe/cus_square.py @@ -15,7 +15,6 @@ import numpy as np from mindspore import Tensor from mindspore.ops import prim_attr_register, PrimitiveWithInfer -from mindspore.ops import operations as P # y = x^2 class CusSquare(PrimitiveWithInfer): @@ -36,10 +35,10 @@ class CusSquare(PrimitiveWithInfer): def infer_dtype(self, data_dtype): return data_dtype - + def get_bprop(self): def bprop(data, out, dout): gradient = data * 2 dx = gradient * dout - return (dx, ) + return (dx,) return bprop diff --git a/tests/st/ops/gpu/test_select_op.py b/tests/st/ops/gpu/test_select_op.py index 03c100cab96a16e7f3dfd96df4cf181681d53628..1b1ccb7ef5c0f810e2541f0f5156bdb229a5b497 100644 --- a/tests/st/ops/gpu/test_select_op.py +++ b/tests/st/ops/gpu/test_select_op.py @@ -27,8 +27,8 @@ class Net(nn.Cell): super(Net, self).__init__() self.select = P.Select() - def construct(self, cond, input_x, input_y): - return self.select(cond, input_x, input_y) + def construct(self, cond_op, input_x, input_y): + return self.select(cond_op, input_x, input_y) cond = np.array([[True, False], [True, False]]).astype(np.bool) diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 5e5fa7deb2136453ff59071363295369285aaa12..bf1d8b72d39b89a2dfc2a773a9e9ce65bc6bb97f 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -315,16 +315,16 @@ test_case_array_ops = [ 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}), ('SpaceToDepthNet', { 'block': SpaceToDepthNet(), - 'desc_inputs': [Tensor(np.random.rand(1,3,2,2).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}), ('DepthToSpaceNet', { 'block': DepthToSpaceNet(), - 'desc_inputs': [Tensor(np.random.rand(1,12,1,1).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}), ('SpaceToBatchNDNet', { 'block': SpaceToBatchNDNet(), - 'desc_inputs': [Tensor(np.random.rand(1,1,2,2).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}), ('BatchToSpaceNDNet', { 'block': BatchToSpaceNDNet(), - 'desc_inputs': [Tensor(np.random.rand(4,1,1,1).astype(np.float16))]}), + 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}), ] test_case_lists = [test_case_array_ops] diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py index 1c4ab8c76d852b8830f3dc08e2442cbf7521e856..5223899256bf2a49576eb32bb6063fd6c41a1787 100755 --- a/tests/ut/python/ops/test_math_ops_check.py +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -26,7 +26,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ class AssignAddNet(nn.Cell): - def __init__(self, ): + def __init__(self,): super(AssignAddNet, self).__init__() self.op = P.AssignAdd() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") @@ -37,7 +37,7 @@ class AssignAddNet(nn.Cell): class AssignSubNet(nn.Cell): - def __init__(self, ): + def __init__(self,): super(AssignSubNet, self).__init__() self.op = P.AssignSub() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") diff --git a/tests/ut/python/ops/test_multitype_ops.py b/tests/ut/python/ops/test_multitype_ops.py index 54997486e990f0f13b9f7f9d07feb23c694975db..33be1adac639c610f3644607c65a31e582275fa3 100644 --- a/tests/ut/python/ops/test_multitype_ops.py +++ b/tests/ut/python/ops/test_multitype_ops.py @@ -13,8 +13,8 @@ # limitations under the License. # ============================================================================ """multitype_ops directory test case""" -import numpy as np from functools import partial, reduce +import numpy as np import mindspore.nn as nn import mindspore.context as context diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 1d93e3c1bd3fd43fb7a6a4cb550d3ef0417c7580..6a04f9e671f219257a1e164ebab1c9111c7225c7 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -231,7 +231,7 @@ class ApplyRMSNet(nn.Cell): self.apply_rms = P.ApplyRMSProp() self.lr = 0.001 self.rho = 0.0 - self.momentum= 0.0 + self.momentum = 0.0 self.epsilon = 1e-10 self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") self.ms = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="ms") @@ -574,7 +574,8 @@ test_case_math_ops = [ ('CumSum', { 'block': CumSumNet(), 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))], - 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))]}), + 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], + [1, 3, 7, 9]]).astype(np.float32))]}), ('ReduceSum_3', { 'block': P.ReduceSum(), 'desc_const': [0], diff --git a/tests/ut/python/ops/test_ops_reid.py b/tests/ut/python/ops/test_ops_reid.py index 741f631ab6f1764ad00e9ae35bf747a033475c9e..b3b3e1d4701c7266b65e566304a3a48927c8a2f9 100644 --- a/tests/ut/python/ops/test_ops_reid.py +++ b/tests/ut/python/ops/test_ops_reid.py @@ -103,7 +103,7 @@ test_case_reid_ops = [ 'desc_bprop': [[128, 64, 112, 112]]}), ('PRelu', { 'block': P.PReLU(), - 'desc_inputs': [[128, 64, 112, 112], [64, ]], + 'desc_inputs': [[128, 64, 112, 112], [64,]], 'desc_bprop': [[128, 64, 112, 112]]}), ('Cos', { 'block': P.Cos(), @@ -155,11 +155,11 @@ test_case = functools.reduce(lambda x, y: x + y, test_case_lists) test_exec_case = filter(lambda x: 'skip' not in x[1] or - 'exec' not in x[1]['skip'], test_case) + 'exec' not in x[1]['skip'], test_case) test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or - 'backward' not in x[1]['skip'] and 'backward_exec' - not in x[1]['skip'], test_case) + 'backward' not in x[1]['skip'] and 'backward_exec' + not in x[1]['skip'], test_case) @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)