diff --git a/python/paddle/fluid/tests/unittests/npu/CMakeLists.txt b/python/paddle/fluid/tests/unittests/npu/CMakeLists.txt index f71e04c09aa38b8cf7b3a167b84d4dc0e6cc3ec7..4ab9262f248a21d454f9d32520fe084553cadfe7 100644 --- a/python/paddle/fluid/tests/unittests/npu/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/npu/CMakeLists.txt @@ -1,6 +1,8 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") -foreach(TEST_OP ${TEST_OPS}) - py_test_modules(${TEST_OP} MODULES ${TEST_OP}) -endforeach(TEST_OP) +if (WITH_ASCEND_CL) + foreach(TEST_OP ${TEST_OPS}) + py_test_modules(${TEST_OP} MODULES ${TEST_OP}) + endforeach(TEST_OP) +endif() diff --git a/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py index 9382cf2162ef2598192e0a8e0f1bd630cbb9a6a4..3c16a24b33191d4fc6942aeaceb4dfc8d04a5e48 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py @@ -25,8 +25,6 @@ import paddle.fluid as fluid paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUAbs(OpTest): def setUp(self): self.op_type = "abs" diff --git a/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py index 5aeca5abd9f8315e2793e6cd5ba53a283c850b85..0f55c8b5914870e775e5e78783d7fcdf3b6fc07b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAccuracy(OpTest): def setUp(self): self.op_type = "accuracy" @@ -60,7 +58,7 @@ class TestAccuracy(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestAccuracy2(TestAccuracy): diff --git a/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py index 17dad036185d844ecb3ebcc154d78b349bab33d0..02d4002f72c492bf7ec05be9539e8e5f8bedb5a0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py @@ -25,8 +25,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAdam(OpTest): def setUp(self): self.set_npu() @@ -75,11 +73,9 @@ class TestAdam(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAdamWithEpsilonTensor(OpTest): def setUp(self): self.set_npu() @@ -131,11 +127,9 @@ class TestAdamWithEpsilonTensor(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAdamOpWithSkipUpdate(OpTest): def setUp(self): self.set_npu() @@ -185,11 +179,9 @@ class TestAdamOpWithSkipUpdate(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAdamOpWithGlobalBetaPow(OpTest): def setUp(self): self.set_npu() @@ -244,11 +236,9 @@ class TestAdamOpWithGlobalBetaPow(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() @@ -309,8 +299,6 @@ class TestNet(unittest.TestCase): self.assertTrue(np.allclose(npu_loss, cpu_loss, rtol=1e-3)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNetWithEpsilonTensor(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py index e92bfbb4d77d33aece688655a19847bc372f39ca..604eb32db0a6c939f8d2b1e01a5f6df1ac28e7b0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py @@ -25,8 +25,6 @@ from paddle.fluid.contrib.mixed_precision.amp_nn import check_finite_and_unscale paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestCheckFiniteAndUnscale(unittest.TestCase): def get_prog(self): paddle.enable_static() @@ -39,11 +37,11 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): name="status", shape=[8], dtype='float32') main_program.global_block().append_op( type="alloc_float_status", - outputs={"FloatStatus": float_status}, ) + outputs={"FloatStatus": float_status}) main_program.global_block().append_op( type="clear_float_status", inputs={"FloatStatus": float_status}, - outputs={"FloatStatusOut": float_status}, ) + outputs={"FloatStatusOut": float_status}) c = paddle.fluid.layers.elementwise_div(a, b) out, found_inf = check_finite_and_unscale( [c], scale, float_status=float_status) @@ -95,8 +93,6 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): self.assertFalse(found_inf[0]) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): def get_prog(self): paddle.enable_static() @@ -109,21 +105,21 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): name="status", shape=[8], dtype='float32') main_program.global_block().append_op( type="alloc_float_status", - outputs={"FloatStatus": float_status}, ) + outputs={"FloatStatus": float_status}) main_program.global_block().append_op( type="clear_float_status", inputs={"FloatStatus": float_status}, - outputs={"FloatStatusOut": float_status}, ) + outputs={"FloatStatusOut": float_status}) c = paddle.fluid.layers.elementwise_div(a, b) out, found_inf = check_finite_and_unscale( [c], scale, float_status=float_status) main_program.global_block().append_op( type="alloc_float_status", - outputs={"FloatStatus": float_status}, ) + outputs={"FloatStatus": float_status}) main_program.global_block().append_op( type="clear_float_status", inputs={"FloatStatus": float_status}, - outputs={"FloatStatusOut": float_status}, ) + outputs={"FloatStatusOut": float_status}) d = paddle.fluid.layers.elementwise_add(a, b) out, found_inf = check_finite_and_unscale( [d], scale, float_status=float_status) diff --git a/python/paddle/fluid/tests/unittests/test_assign_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py similarity index 88% rename from python/paddle/fluid/tests/unittests/test_assign_op_npu.py rename to python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py index ed21549b7e01fddcf30c8a53f1a96081cb3bb8f0..14133d5a385ff84536ab414cd0388ab4edabb722 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAssign(OpTest): def setUp(self): self.set_npu() @@ -49,7 +47,7 @@ class TestAssign(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py index eb00e777fe0eef3811c9749d477677ce794d095d..16db952533437c699fb7993350f280a03ccbe76a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py @@ -147,8 +147,6 @@ def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): return expected -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestBCELoss(unittest.TestCase): def test_BCELoss(self): input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32) @@ -220,8 +218,6 @@ def bce_loss(input, label): return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestBceLossOp(OpTest): def setUp(self): self.set_npu() @@ -248,15 +244,11 @@ class TestBceLossOp(OpTest): self.shape = [10, 10] -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestBceLossOpCase1(OpTest): def init_test_cast(self): self.shape = [2, 3, 4, 5] -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestBceLossOpCase2(OpTest): def init_test_cast(self): self.shape = [2, 3, 20] diff --git a/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py index ae48866b7b969d5e7f2d7bf0dc9ed93c46aed4bb..f522eb10d92e6b7e377f072cc153f8b275a053e5 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestCast1(OpTest): def setUp(self): self.set_npu() @@ -48,7 +46,7 @@ class TestCast1(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestCast2(OpTest): @@ -70,7 +68,7 @@ class TestCast2(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + self.check_output_with_place(self.place, atol=1e-3) class TestCast3(OpTest): @@ -92,7 +90,7 @@ class TestCast3(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + self.check_output_with_place(self.place, atol=1e-3) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py index 37fa5f8cad2abee06438f2d27da5e27ff5bbf963..f1bbf0becf1950360735a60838257c2084a53416 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py @@ -28,8 +28,6 @@ SEED = 2021 alignment = 512 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAllocContinuousSpace(OpTest): def setUp(self): self.__class__.use_npu = True @@ -82,12 +80,9 @@ class TestAllocContinuousSpace(OpTest): self.check_output_with_place( place=paddle.NPUPlace(0), no_check_set=["FusedOutput"], - atol=1e-5, - check_dygraph=False) + atol=1e-5, ) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAllocContinuousSpace2(TestAllocContinuousSpace): def init_attr(self): return { @@ -102,8 +97,7 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace): self.check_output_with_place( place=paddle.NPUPlace(0), no_check_set=["FusedOutput"], - atol=1e-5, - check_dygraph=False) + atol=1e-5, ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py index 54a2c1e7163a9f122927bc2781eb1a13a84a124e..909bfaaa07fd5894eda6a8a8998d0fd897724413 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestEqual(OpTest): def setUp(self): self.set_npu() @@ -53,11 +51,9 @@ class TestEqual(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLessthan(OpTest): def setUp(self): self.set_npu() @@ -83,7 +79,7 @@ class TestLessthan(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestEqual2(TestEqual): diff --git a/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py index a2ec1c7a9eef6ebc516cdc94062dd628c77a1f81..8f11d00ccabf67df24073755da2b744467b22825 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestConcat(OpTest): def setUp(self): self.set_npu() @@ -56,7 +54,7 @@ class TestConcat(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def init_test_data(self): self.x0 = np.random.random((1, 4, 50)).astype(self.dtype) @@ -65,12 +63,9 @@ class TestConcat(OpTest): self.axis = 0 def test_check_grad(self): - self.check_grad_with_place( - self.place, ['x0', 'x2'], 'Out', check_dygraph=False) - self.check_grad_with_place( - self.place, ['x1'], 'Out', check_dygraph=False) - self.check_grad_with_place( - self.place, ['x2'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['x0', 'x2'], 'Out') + self.check_grad_with_place(self.place, ['x1'], 'Out') + self.check_grad_with_place(self.place, ['x2'], 'Out') class TestConcatFP16(OpTest): @@ -102,7 +97,7 @@ class TestConcatFP16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def init_test_data(self): self.x0 = np.random.random((1, 4, 50)).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py index 6b936514452f74df2a1692538ddb0d4128365fc2..4c434561e1cc11a84bef6a2f8c07ed192d028452 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py @@ -28,8 +28,6 @@ SEED = 2021 EPOCH = 100 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOp(OpTest): def setUp(self): self.op_type = "dropout" @@ -55,17 +53,14 @@ class TestDropoutOp(OpTest): self.place = paddle.NPUPlace(0) def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad_normal(self): if self.dtype == np.float16: return - self.check_grad_with_place( - self.place, ['X'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['X'], 'Out') -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOpInput1d(TestDropoutOp): # change input shape def setUp(self): @@ -85,15 +80,13 @@ class TestDropoutOpInput1d(TestDropoutOp): } -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOpInput1d(TestDropoutOp): # the input is 1-D def setUp(self): self.op_type = "dropout" self.set_npu() self.init_dtype() - self.inputs = {'X': np.random.random((2000, )).astype(self.dtype)} + self.inputs = {'X': np.random.random((2000)).astype(self.dtype)} self.attrs = { 'dropout_prob': 0.0, 'fix_seed': True, @@ -106,8 +99,6 @@ class TestDropoutOpInput1d(TestDropoutOp): } -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOp2(TestDropoutOp): # the dropout_prob is 1.0 def setUp(self): @@ -127,8 +118,6 @@ class TestDropoutOp2(TestDropoutOp): } -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOp3(TestDropoutOp): # the input dim is 3 def setUp(self): @@ -148,8 +137,6 @@ class TestDropoutOp3(TestDropoutOp): } -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOpInference(OpTest): # is_test = True @@ -174,11 +161,9 @@ class TestDropoutOpInference(OpTest): self.place = paddle.NPUPlace(0) def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOpInference2(TestDropoutOpInference): def setUp(self): @@ -194,8 +179,6 @@ class TestDropoutOpInference2(TestDropoutOpInference): self.outputs = {'Out': self.inputs['X']} -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOpWithSeed(TestDropoutOp): # the seed is a Tensor def setUp(self): @@ -218,8 +201,6 @@ class TestDropoutOpWithSeed(TestDropoutOp): } -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutOpFp16(TestDropoutOp): # float16 def init_dtype(self): @@ -231,8 +212,6 @@ class TestDropoutOpFp16(TestDropoutOp): self.place = paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestDropoutAPI(unittest.TestCase): def setUp(self): np.random.seed(123) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py index 6a82157faaec41d9abaffa9b68e3a3e80b6b2fb3..5288db5ceb1c6fe33af912ed4408352997876943 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py @@ -25,8 +25,6 @@ import paddle.fluid as fluid paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseAddOp(OpTest): def setUp(self): self.set_npu() @@ -62,34 +60,32 @@ class TestElementwiseAddOp(OpTest): self.axis = -1 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad_normal(self): self.check_grad_with_place( - self.place, ['X', 'Y'], + self.place, + ['X', 'Y'], 'Out', - max_relative_error=0.006, - check_dygraph=False) + max_relative_error=0.006, ) def test_check_grad_ingore_x(self): self.check_grad_with_place( - self.place, ['Y'], + self.place, + ['Y'], 'Out', no_grad_set=set("X"), - max_relative_error=0.006, - check_dygraph=False) + max_relative_error=0.006, ) def test_check_grad_ingore_y(self): self.check_grad_with_place( - self.place, ['X'], + self.place, + ['X'], 'Out', no_grad_set=set("Y"), - max_relative_error=0.006, - check_dygraph=False) + max_relative_error=0.006, ) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAddAPI(unittest.TestCase): def test_name(self): with paddle.static.program_guard(paddle.static.Program()): @@ -134,8 +130,6 @@ class TestAddAPI(unittest.TestCase): msg="z_value = {}, but expected {}".format(z_value, z_expected)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAddError(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py index 0ae2678d10b47c8998882e3ee00d177e86236a06..ee72ee74065e3b1dd417a5d3adac8711a2464cba 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseDiv(OpTest): def setUp(self): self.set_npu() @@ -54,30 +52,28 @@ class TestElementwiseDiv(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad_normal(self): self.check_grad_with_place( - self.place, ['X', 'Y'], + self.place, + ['X', 'Y'], 'Out', - max_relative_error=0.007, - check_dygraph=False) + max_relative_error=0.007, ) def test_check_grad_ingore_x(self): self.check_grad_with_place( - self.place, ['Y'], + self.place, + ['Y'], 'Out', max_relative_error=0.007, - no_grad_set=set("X"), - check_dygraph=False) + no_grad_set=set("X"), ) def test_check_grad_ingore_y(self): self.check_grad_with_place( - self.place, ['X'], 'Out', no_grad_set=set("Y"), check_dygraph=False) + self.place, ['X'], 'Out', no_grad_set=set("Y")) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseDivFp16(OpTest): def setUp(self): self.set_npu() @@ -105,11 +101,9 @@ class TestElementwiseDivFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseDivNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py index 93538e938670f07ab78f33e0c9749b702854b7d6..36d282a3d06f77369024d5c08e3f5d467d0a76d4 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py @@ -24,8 +24,6 @@ import paddle paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseFloorDiv(OpTest): def setUp(self): self.op_type = "elementwise_floordiv" @@ -53,11 +51,9 @@ class TestElementwiseFloorDiv(OpTest): self.dtype = "int64" def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseFloorDiv2(TestElementwiseFloorDiv): def init_dtype(self): self.dtype = "int32" diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py similarity index 91% rename from python/paddle/fluid/tests/unittests/test_elementwise_max_op_npu.py rename to python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py index 6475caf970cba7be5efad60b9a4c094e112175c3..6c325b020206c4e6c96c83a318edbebf79a3768f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMax(OpTest): def setUp(self): self.set_npu() @@ -54,7 +52,7 @@ class TestElementwiseMax(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Max grad test # def test_check_grad(self): @@ -64,8 +62,6 @@ class TestElementwiseMax(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMaxFp16(OpTest): def setUp(self): self.set_npu() @@ -93,11 +89,9 @@ class TestElementwiseMaxFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMaxNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py index b4d9c7285b2b556f76f94241cc0b9373319f7753..2034a12c5c0feb92e351889c3ca7ec25c0b29611 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMin(OpTest): def setUp(self): self.set_npu() @@ -54,7 +52,7 @@ class TestElementwiseMin(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Min grad test # def test_check_grad(self): @@ -64,8 +62,6 @@ class TestElementwiseMin(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMinFp16(OpTest): def setUp(self): self.set_npu() @@ -93,11 +89,9 @@ class TestElementwiseMinFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMinNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py index 9bfb7e033e7ea454223c683877bb30f02506be75..ea94661e8a51e680b23e6bb095caa813c29ab74f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMul(OpTest): def setUp(self): self.set_npu() @@ -54,7 +52,7 @@ class TestElementwiseMul(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Mul grad test # def test_check_grad(self): @@ -64,8 +62,6 @@ class TestElementwiseMul(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMulFp16(OpTest): def setUp(self): self.set_npu() @@ -93,11 +89,9 @@ class TestElementwiseMulFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseMulNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py index 862c546b8e05ebe2046e9c5aeb52178fa47f59ab..dea1828a6d75fca3e6a871207e8a746305169a6c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwisePow(OpTest): def setUp(self): self.set_npu() @@ -54,7 +52,7 @@ class TestElementwisePow(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Pow grad test # def test_check_grad(self): @@ -64,8 +62,6 @@ class TestElementwisePow(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwisePowFp16(OpTest): def setUp(self): self.set_npu() @@ -93,11 +89,9 @@ class TestElementwisePowFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwisePowNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py index 8c6c7b46f49f2725b646202998095adef3a65e63..6faa77b4602137ad0e870fa8b7bd213e50c49022 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestElementwiseSubOp(OpTest): def setUp(self): self.set_npu() @@ -64,7 +62,7 @@ class TestElementwiseSubOp(OpTest): self.axis = 0 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): For grad tests, OpTest raises FatalError:Segmentation fault # when call op.run, which may be caused by system environment exception @@ -74,7 +72,7 @@ class TestElementwiseSubOp(OpTest): # self.place, ['X', 'Y'], # 'Out', # max_relative_error=0.006, - # check_dygraph=False) + # ) # # def test_check_grad_ingore_x(self): # self.check_grad_with_place( @@ -82,18 +80,16 @@ class TestElementwiseSubOp(OpTest): # 'Out', # no_grad_set=set("X"), # max_relative_error=0.006, - # check_dygraph=False) + # ) # # def test_check_grad_ingore_y(self): # self.check_grad_with_place( # self.place, ['X'], # 'Out', # no_grad_set=set("Y"), - # max_relative_error=0.006,check_dygraph=False) + # max_relative_error=0.006,) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSubtractAPI(unittest.TestCase): def test_name(self): with paddle.static.program_guard(paddle.static.Program()): @@ -138,8 +134,6 @@ class TestSubtractAPI(unittest.TestCase): msg="z_value = {}, but expected {}".format(z_value, z_expected)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSubtractError(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): @@ -158,8 +152,6 @@ class TestSubtractError(unittest.TestCase): self.assertRaises(TypeError, paddle.subtract, x2, y2) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSubtractNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py index f6a84d3be5c100c7324f2ed62ce8c934f4318b7f..375003f79e500f99ceaf374ce898d998263700e0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestExpand(OpTest): def setUp(self): self.set_npu() @@ -50,7 +48,7 @@ class TestExpand(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Add grad test # def test_check_grad(self): @@ -60,8 +58,6 @@ class TestExpand(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestExpandV2(TestExpand): def setUp(self): self.set_npu() @@ -82,8 +78,6 @@ class TestExpandV2(TestExpand): self.outputs = {'Out': out} -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestExpandFp16(TestExpand): no_need_check_grad = True @@ -91,8 +85,6 @@ class TestExpandFp16(TestExpand): self.dtype = np.float16 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestExpandNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py index 6e619bfd11fb901994ad3a91187a716b014dab41..c8d7f2f9dc9873739075ff5808a856cd3e8a6c0a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestFillConstant(OpTest): def setUp(self): self.set_npu() @@ -47,7 +45,7 @@ class TestFillConstant(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestFillConstantInt(OpTest): @@ -71,7 +69,7 @@ class TestFillConstantInt(OpTest): self.dtype = np.int32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestFillConstantFP16(OpTest): @@ -95,7 +93,7 @@ class TestFillConstantFP16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + self.check_output_with_place(self.place, atol=1e-3) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py index 008422ffd21188327fa938734928a0dc62187824..daca3d884600a09da244d4ec2952b2028c7e9762 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py @@ -34,8 +34,6 @@ def gather_numpy(x, index, axis): return gather -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestGatherOp(OpTest): def setUp(self): self.set_npu() @@ -53,14 +51,14 @@ class TestGatherOp(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad(self): self.check_grad_with_place( - self.place, ['X'], + self.place, + ['X'], 'Out', - max_relative_error=0.006, - check_dygraph=False) + max_relative_error=0.006, ) def config(self): """ @@ -72,8 +70,6 @@ class TestGatherOp(OpTest): self.index_type = "int32" -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestCase1(TestGatherOp): def config(self): """ @@ -85,8 +81,6 @@ class TestCase1(TestGatherOp): self.index_type = "int32" -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class API_TestGather(unittest.TestCase): def test_out1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -120,8 +114,6 @@ class API_TestGather(unittest.TestCase): self.assertTrue(np.allclose(result, expected_output)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestGatherGrad(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py index 07e214e06003f3fa0bdca0c73911124126e347d2..11f64b8fc7d269b908c2cec026b2d3abfb3c48a8 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py @@ -26,8 +26,6 @@ from test_gaussian_random_op import TestGaussianRandomOp paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUGaussianRandomOp(OpTest): def setUp(self): self.set_npu() diff --git a/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py index 4127c7382880e261ce4a376ccc0660e486fdc699..760ce59812ea2e9516d3f4da39938c4b59eee838 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py @@ -32,8 +32,6 @@ def np_gelu(x): return y -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestGelu(OpTest): def setUp(self): self.set_npu() @@ -56,18 +54,13 @@ class TestGelu(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + self.check_output_with_place(self.place, atol=1e-3) def test_check_grad(self): self.check_grad_with_place( - self.place, ['X'], - 'Out', - check_dygraph=False, - max_relative_error=0.007) + self.place, ['X'], 'Out', max_relative_error=0.007) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestGeluFp16(OpTest): def setUp(self): self.set_npu() @@ -91,11 +84,9 @@ class TestGeluFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + self.check_output_with_place(self.place, atol=1e-3) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestGeluNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py index 3e2e8f944b84c6f2c97b7d834e6f2b6b7511583d..dfb9b26d64ea852614bba1a30bf9b9555fb110e6 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py @@ -29,8 +29,6 @@ SEED = 2021 NPUPlace = 0 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestIncrement(OpTest): def setUp(self): self.set_npu() @@ -54,11 +52,9 @@ class TestIncrement(OpTest): self.dtype = np.int64 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestIncrementFP16(OpTest): def setUp(self): self.set_npu() @@ -82,11 +78,9 @@ class TestIncrementFP16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestIncrementInplace(unittest.TestCase): def test_npu(self): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py index d447dfb8d4d031e6f29fdfedab285066d4dea565..0345ac1f2065b1b6dfa769dd7e6fd64c7790df6f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py @@ -36,8 +36,6 @@ from op_test import _set_use_system_allocator _set_use_system_allocator(False) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLayerNormOp(unittest.TestCase): def setUp(self): self.use_cudnn = True @@ -191,8 +189,6 @@ class TestLayerNormOp(unittest.TestCase): self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLayerNormOpFP16(TestLayerNormOp): def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py index 3cdd2448628a0b0f1900cc8b15d884d578a445ca..9534431e99a7a2e0218fe08dfd95a770b9924915 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLog(OpTest): def setUp(self): self.set_npu() @@ -50,7 +48,7 @@ class TestLog(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Add grad test # def test_check_grad(self): @@ -60,8 +58,6 @@ class TestLog(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLogFp16(OpTest): def setUp(self): self.set_npu() @@ -85,11 +81,9 @@ class TestLogFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLogNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py index 6d1327f068a528c97b8b3e882bc37f2c07635acb..f695eeb0f27743144bb67486d6435fcc21604617 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py @@ -220,8 +220,6 @@ def type_map_factory(): } for x_type in x_type_list for y_type in y_type_list] -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestCPU(unittest.TestCase): def test(self): test(self) @@ -235,8 +233,6 @@ class TestCPU(unittest.TestCase): test_type_error(self, False, type_map) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPU(unittest.TestCase): def test(self): test(self, True) diff --git a/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py index 41fe0636bd7790433dee33dd358ec7ed6d7ae9e5..56f04a6e993f3aba98d6d3ed874e460888412e1d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLookupTableV2(OpTest): def setUp(self): self.set_npu() @@ -67,17 +65,14 @@ class TestLookupTableV2(OpTest): self.dim = 20 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad_with_place( - self.place, ['W'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['W'], 'Out') -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLookupTableV2FP16(TestLookupTableV2): no_need_check_grad = True @@ -89,16 +84,12 @@ class TestLookupTableV2FP16(TestLookupTableV2): self.__class__.no_need_check_grad = True -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLookupTableV2Dim32(TestLookupTableV2): def init_dim(self): # embedding_dim is multiple of 32 self.dim = 64 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestLookupTableV2Dim32FP16(TestLookupTableV2): no_need_check_grad = True diff --git a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py index b093fa4f2caa4abc1262c150d84ebff7dae14328..53766c5eb61b7a429636551b5e7b1c926f9b38e4 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py @@ -26,15 +26,13 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): """Reference forward implementation using np.matmul.""" # np.matmul does not support the transpose flags, so we manually # transpose X and Y appropriately. if transpose_X: if X.ndim == 1: - X = X.reshape((X.size, )) + X = X.reshape((X.size)) elif X.ndim == 2: X = X.T else: @@ -43,7 +41,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): X = np.transpose(X, tuple(dim)) if transpose_Y: if Y.ndim == 1: - Y = Y.reshape((Y.size, )) + Y = Y.reshape((Y.size)) else: dim = [i for i in range(len(Y.shape))] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] @@ -53,7 +51,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): if not Out.shape: # We do not support 0-dimensional Tensors (scalars). So where # np.matmul outputs a scalar, we must convert to a Tensor of - # shape (1, ) instead. + # shape (1) instead. # Everywhere else, we are compatible with np.matmul. Out = np.array([Out], dtype="float64") return Out @@ -95,7 +93,7 @@ class TestMatMul(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) # TODO(ascendrc): Add grad test @@ -137,8 +135,6 @@ class TestMatMul4(TestMatMul): self.trans_y = False -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMatMulNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() @@ -207,8 +203,8 @@ class TestMatMulNet(unittest.TestCase): # The precision is aligned in NPU and GPU separately, which is only used for the usage method. -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestMatMulNet3_2(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py index 6e8f99a9dbb19785094ad6a94d9f371fe409fc69..e69c2fd84dd9dbb8ed31ba4071ef3d22eb4916a8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMean(OpTest): def setUp(self): self.set_npu() @@ -50,15 +48,12 @@ class TestMean(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place( - self.place, ['X'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['X'], 'Out') -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMeanFP16(OpTest): def setUp(self): self.set_npu() @@ -81,7 +76,7 @@ class TestMeanFP16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py index 63c4fb8e5885eaa33ba18227c3e89ce3b0c97b84..a421e2667347c5b046e424a59f2efd2022a3199d 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py @@ -28,8 +28,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMemcpy_FillConstant(unittest.TestCase): def get_prog(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py index 07f187a0f0de9d6f570ff610ca412a37bfae895e..cb58a2a8d440936c50b009dbdece5587ee94961f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py @@ -52,30 +52,30 @@ class TestMul(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) def test_check_grad_normal(self): self.check_grad_with_place( - self.place, ['X', 'Y'], + self.place, + ['X', 'Y'], 'Out', - max_relative_error=0.0065, - check_dygraph=False) + max_relative_error=0.0065, ) def test_check_grad_ingore_x(self): self.check_grad_with_place( - self.place, ['Y'], + self.place, + ['Y'], 'Out', no_grad_set=set("X"), - max_relative_error=0.0065, - check_dygraph=False) + max_relative_error=0.0065, ) def test_check_grad_ingore_y(self): self.check_grad_with_place( - self.place, ['X'], + self.place, + ['X'], 'Out', no_grad_set=set("Y"), - max_relative_error=0.0065, - check_dygraph=False) + max_relative_error=0.0065, ) @skip_check_grad_ci( @@ -170,8 +170,6 @@ class TestMul3FP16(TestMul3): pass -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMulNet(unittest.TestCase): def init_dtype(self): self.dtype = np.float32 @@ -243,8 +241,6 @@ class TestMulNet(unittest.TestCase): self.assertTrue(np.allclose(npu_loss, cpu_loss)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMulNet3_2(unittest.TestCase): def init_dtype(self): self.dtype = np.float32 @@ -317,8 +313,6 @@ class TestMulNet3_2(unittest.TestCase): self.assertTrue(np.allclose(npu_loss, cpu_loss, atol=1e-5)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestMulNet3_2_xc2(unittest.TestCase): def init_dtype(self): self.dtype = np.float32 diff --git a/python/paddle/fluid/tests/unittests/npu/test_npu_place.py b/python/paddle/fluid/tests/unittests/npu/test_npu_place.py index 3f71fad2b9c1084148d8b0a28e556cc0bf5f366e..91e0c29e10609b7cbe41a657e62d7a1b2f3c8d7d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_npu_place.py +++ b/python/paddle/fluid/tests/unittests/npu/test_npu_place.py @@ -22,8 +22,6 @@ from paddle.fluid import core paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNpuPlace(unittest.TestCase): def test(self): p = core.Place() @@ -33,8 +31,6 @@ class TestNpuPlace(unittest.TestCase): self.assertEqual(p.npu_device_id(), 0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNpuPlaceError(unittest.TestCase): def test_static(self): # NPU is not supported in ParallelExecutor diff --git a/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py index 8c67766b31184a36446c4fa39f64f760fa23912c..a188953d70c93cc0853b0f58d65c9a464688c6dc 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestPow(OpTest): def setUp(self): self.set_npu() @@ -50,15 +48,12 @@ class TestPow(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place( - self.place, ['X'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['X'], 'Out') -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestPowFp16(OpTest): def setUp(self): self.set_npu() @@ -82,11 +77,9 @@ class TestPowFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestPowNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py index 583a648224d7309bb1f2aa29db6871020091867f..1a30d1395283ec9bc60dcceaa1b5963b9176b60a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py @@ -28,8 +28,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAny8DOp(OpTest): def setUp(self): self.set_npu() @@ -46,11 +44,9 @@ class TestAny8DOp(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAnyOpWithDim(OpTest): def setUp(self): self.set_npu() @@ -64,11 +60,9 @@ class TestAnyOpWithDim(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAny8DOpWithDim(OpTest): def setUp(self): self.set_npu() @@ -85,18 +79,16 @@ class TestAny8DOpWithDim(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestAnyOpWithKeepDim(OpTest): def setUp(self): self.set_npu() self.op_type = "reduce_any" self.place = paddle.NPUPlace(0) self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': (1, ), 'keep_dim': True} + self.attrs = {'dim': (1), 'keep_dim': True} self.outputs = { 'Out': np.expand_dims( self.inputs['X'].any(axis=self.attrs['dim']), axis=1) @@ -106,7 +98,7 @@ class TestAnyOpWithKeepDim(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestAny8DOpWithKeepDim(OpTest): @@ -118,7 +110,7 @@ class TestAny8DOpWithKeepDim(OpTest): 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") } - self.attrs = {'dim': (1, ), 'keep_dim': True} + self.attrs = {'dim': (1), 'keep_dim': True} self.outputs = { 'Out': np.expand_dims( self.inputs['X'].any(axis=self.attrs['dim']), axis=1) @@ -128,7 +120,7 @@ class TestAny8DOpWithKeepDim(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py index d3861bf0780cb58f7362ff9dbd05c99a222bc21b..bd7ce2a040c93b406ac136738a4a32a3c32e840c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReduceSum(OpTest): def setUp(self): np.random.seed(SEED) @@ -66,10 +64,10 @@ class TestReduceSum(OpTest): def initTestCase(self): self.shape = (5, 6) - self.axis = (0, ) + self.axis = (0) def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Add grad test # def test_check_grad(self): @@ -84,8 +82,6 @@ class TestReduceSum2(OpTest): self.dtype = np.int32 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReduceSumNet(unittest.TestCase): def set_reduce_sum_function(self, x): # keep_dim = False @@ -151,16 +147,12 @@ class TestReduceSumNet(unittest.TestCase): self.assertTrue(np.allclose(npu_loss, cpu_loss)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReduceSumNet2(TestReduceSumNet): def set_reduce_sum_function(self, x): # keep_dim = True return paddle.fluid.layers.reduce_sum(x, dim=-1, keep_dim=True) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReduceSumNet3(TestReduceSumNet): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py index 9273d01299d8f564ee0ae575b47bb30e939c3d76..a2547808e6f161ae1cdac5ea5944863d7c640d24 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestRelu(OpTest): def setUp(self): self.set_npu() @@ -50,11 +48,9 @@ class TestRelu(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReluFp16(OpTest): def setUp(self): self.set_npu() @@ -78,11 +74,9 @@ class TestReluFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReluNeg(OpTest): def setUp(self): self.set_npu() @@ -105,13 +99,13 @@ class TestReluNeg(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestReluNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py index 885c990c702bd35d2052b3cb79abf11a74b3efc2..520de15f4df62be60faf910cd8ddb515a66c4f7e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestReshape2(OpTest): def setUp(self): self.set_npu() @@ -51,12 +49,10 @@ class TestReshape2(OpTest): self.infered_shape = (20, 10) def test_check_output(self): - self.check_output_with_place( - self.place, check_dygraph=False, no_check_set=['XShape']) + self.check_output_with_place(self.place, no_check_set=['XShape']) def test_check_grad_normal(self): - self.check_grad_with_place( - self.place, ['X'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['X'], 'Out') class TestReshape2_case2(TestReshape2): diff --git a/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py b/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py index e7e7fb39c913b28f1e3597a5a5ba4f57f98c108d..3bdf8146fb22846369f484f92a6208e3883e65ad 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py @@ -36,56 +36,42 @@ from test_static_save_load import * paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUSaveLoadBase(TestSaveLoadBase): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUSaveLoadPartial(TestSaveLoadPartial): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUSaveLoadSetStateDict(TestSaveLoadSetStateDict): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUProgramStatePartial(TestProgramStatePartial): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPULoadFromOldInterface(TestLoadFromOldInterface): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPULoadFromOldInterfaceSingleFile(TestLoadFromOldInterfaceSingleFile): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUProgramStateOldSave(TestProgramStateOldSave): def setUp(self): self.test_dygraph = False @@ -95,8 +81,6 @@ class TestNPUProgramStateOldSave(TestProgramStateOldSave): ) else paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUProgramStateOldSaveSingleModel(TestProgramStateOldSaveSingleModel): def set_place(self): return fluid.CPUPlace() if not core.is_compiled_with_npu( diff --git a/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py index 9b4547bc24474afccf2454f992a1c92c3dd22605..65ec28fbf7d3a394ce0ac93c81a651445d584c75 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestScale(OpTest): def setUp(self): self.set_npu() @@ -51,7 +49,7 @@ class TestScale(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestFP16Scale(TestScale): @@ -82,7 +80,7 @@ class TestBiasAfterScale(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py index c3e52c9bfad533bdef724cff7e447f991fa2d6b2..c05b53d9a48621c61c8c958c360a13d2e9bf9466 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestCast1(OpTest): def setUp(self): self.set_npu() @@ -49,7 +47,7 @@ class TestCast1(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestCast2(OpTest): @@ -72,7 +70,7 @@ class TestCast2(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestCast3(OpTest): @@ -95,7 +93,7 @@ class TestCast3(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestCast4(OpTest): @@ -119,7 +117,7 @@ class TestCast4(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py index 29914d21e2673ca92440c5346bfc2d6b5522849c..85a1e0594ba945cd267ea886f1979c25f14ef8d8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py @@ -26,8 +26,6 @@ import paddle.fluid.core as core paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSeedOpFixSeed(OpTest): def setUp(self): self.set_npu() @@ -43,8 +41,6 @@ class TestSeedOpFixSeed(OpTest): self.check_output_with_place(paddle.NPUPlace(0)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSeedOpDiffSeed(OpTest): def setUp(self): self.set_npu() diff --git a/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py index af0dea4776d23fdebe26f68b5c84c7d3d07d2940..99061cba8d27000c1917b61a7efeb8215316cec2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py @@ -24,8 +24,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSGD(OpTest): def setUp(self): self.set_npu() @@ -50,11 +48,9 @@ class TestSGD(OpTest): self.w = 15 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py index 7b9a74b2be98dee86f2b3192d746cc56895ca1d9..cb1b0c458fcaa95d16f22c8f61e6bfbf92aae89e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestShape(OpTest): def setUp(self): self.set_npu() @@ -50,7 +48,7 @@ class TestShape(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py index b56ee8c8c0748b1e8afacd5cab3ab1b721d35cfa..5a38f14868bb8a0feb93a47b00b591edaf0a5855 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py @@ -27,8 +27,6 @@ SEED = 2021 EPOCH = 100 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSliceOp(OpTest): def setUp(self): self.op_type = "slice" @@ -60,13 +58,12 @@ class TestSliceOp(OpTest): self.place = paddle.NPUPlace(0) def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad_normal(self): if self.dtype == np.float16: return - self.check_grad_with_place( - self.place, ['Input'], 'Out', check_dygraph=False) + self.check_grad_with_place(self.place, ['Input'], 'Out') class TestSliceOp2(TestSliceOp): @@ -79,8 +76,6 @@ class TestSliceOp2(TestSliceOp): self.out = self.input[:, 0:1, :] -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSliceOpFp16(TestSliceOp): def init_dtype(self): self.dtype = np.float16 @@ -147,8 +142,6 @@ class TestSliceOpTensor2(TestSliceOpTensor): self.out = self.input[:, 0:1, :] -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSliceOpFp16Tensor(TestSliceOpTensor): def init_dtype(self): self.dtype = np.float16 @@ -237,8 +230,6 @@ class TestSliceOpTensorList2(TestSliceOpTensorList): self.out = self.input[:, 0:1, :] -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSliceOpFp16TensorList(TestSliceOpTensorList): def init_dtype(self): self.dtype = np.float16 @@ -249,8 +240,6 @@ class TestSliceOpFp16TensorList(TestSliceOpTensorList): self.place = paddle.NPUPlace(0) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSliceNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py index c1ba41943a359ba2103bfd34c722c697d6b01b2f..f2a9ef2bee074def9bd5cd8d3f4ae0a6ac913815 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSoftmax(OpTest): def setUp(self): self.set_npu() @@ -51,11 +49,9 @@ class TestSoftmax(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSoftmaxNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py index 2ee089360e6dd2f62aabdd25179e7e9410b365e4..8d78ee6a97efdd1df99c9636e8e18a2905d858a5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py @@ -28,8 +28,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSoftmaxWithCrossEntropyOp(OpTest): def set_npu(self): self.__class__.use_npu = True @@ -86,7 +84,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.attrs['axis'] = self.axis def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) def test_check_grad(self): if self.dtype == np.float16: @@ -95,13 +93,10 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.check_grad_with_place( self.place, ['Logits'], 'Loss', - check_dygraph=False, numeric_grad_delta=0.001, max_relative_error=0.5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestPowNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py index 556fa76424b8b60f2efff371c833f57bdc341e40..acb99746d231ded16032bfdc1839b6b0f3120f62 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSqrt(OpTest): def setUp(self): self.set_npu() @@ -50,7 +48,7 @@ class TestSqrt(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Add grad test # def test_check_grad(self): @@ -60,8 +58,6 @@ class TestSqrt(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSqrtFp16(OpTest): def setUp(self): self.set_npu() @@ -85,11 +81,9 @@ class TestSqrtFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSqrtNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py index 8c1a8d0070484a3b536256a6e8aafeb20fcf0ae0..8262c3b94d6a8531fe104eed6c4c447d0e9edef1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSquare(OpTest): def setUp(self): self.set_npu() @@ -50,7 +48,7 @@ class TestSquare(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Add grad test # def test_check_grad(self): @@ -60,8 +58,6 @@ class TestSquare(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSquareFp16(OpTest): def setUp(self): self.set_npu() @@ -85,11 +81,9 @@ class TestSquareFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5) + self.check_output_with_place(self.place, atol=1e-5) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSquareNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py index 7a725b3b9d5d31f174a2b1c818480f95d8c4274d..2e741c8d8a5ef00f23fb5171dff0f5e147203094 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py @@ -26,10 +26,9 @@ from paddle.fluid import Program, program_guard paddle.enable_static() - # Correct: General. -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestSqueezeOp(OpTest): def setUp(self): self.set_npu() @@ -58,8 +57,8 @@ class TestSqueezeOp(OpTest): # Correct: There is mins axis. -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestSqueezeOp1(TestSqueezeOp): def init_test_case(self): self.ori_shape = (1, 3, 1, 40) @@ -68,8 +67,8 @@ class TestSqueezeOp1(TestSqueezeOp): # Correct: No axes input. -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestSqueezeOp2(TestSqueezeOp): def init_test_case(self): self.ori_shape = (1, 20, 1, 5) @@ -78,8 +77,8 @@ class TestSqueezeOp2(TestSqueezeOp): # Correct: Just part of axes be squeezed. -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestSqueezeOp3(TestSqueezeOp): def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) @@ -88,8 +87,8 @@ class TestSqueezeOp3(TestSqueezeOp): # Correct: The demension of axis is not of size 1 remains unchanged. -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") + + class TestSqueezeOp4(TestSqueezeOp): def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py index 721fb95dd9b72f989746bbe1a7e27596a6b18a34..bdfc7a03c6c8324ce8fe8cc4cd211a058d7fcb75 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py @@ -26,8 +26,6 @@ import paddle.fluid.core as core paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOpBase(OpTest): def initDefaultParameters(self): self.num_inputs = 4 @@ -77,50 +75,36 @@ class TestStackOpBase(OpTest): self.check_grad_with_place(self.place, self.get_x_names(), 'Y') -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp1(TestStackOpBase): def initParameters(self): self.num_inputs = 16 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp2(TestStackOpBase): def initParameters(self): self.num_inputs = 20 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp3(TestStackOpBase): def initParameters(self): self.axis = -1 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp4(TestStackOpBase): def initParameters(self): self.axis = -4 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp5(TestStackOpBase): def initParameters(self): self.axis = 1 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp6(TestStackOpBase): def initParameters(self): self.axis = 3 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackAPIWithLoDTensorArray(unittest.TestCase): """ Test stack api when the input(x) is a LoDTensorArray. @@ -157,8 +141,6 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): [self.x] * self.iter_num, axis=self.axis))) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): """ Test stack api when the input(x) is a LoDTensorArray. @@ -195,8 +177,6 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): [self.x] * self.iter_num, axis=self.axis))) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class API_test(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -223,8 +203,6 @@ class API_test(unittest.TestCase): self.assertRaises(TypeError, paddle.stack, x) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class API_DygraphTest(unittest.TestCase): def test_out(self): data1 = np.array([[1.0, 2.0]]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py index 21b42814c07b062937019e36c67f236d1c5e1c5d..1ea8504ceec01f7b8349ee3d4837757c4dd5fa29 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestSum1(OpTest): def setUp(self): self.set_npu() @@ -52,7 +50,7 @@ class TestSum1(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestSum2(OpTest): @@ -86,7 +84,7 @@ class TestSum2(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestSum3(OpTest): @@ -111,7 +109,7 @@ class TestSum3(OpTest): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py index 235fa2783fb3c8c507ebfa73c5631c551fce4f1a..55be94da2b7e0346d8c6783d244c9d3a2c43273e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py @@ -26,8 +26,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTanh(OpTest): def setUp(self): self.set_npu() @@ -50,7 +48,7 @@ class TestTanh(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) # TODO(ascendrc): Add grad test # def test_check_grad(self): @@ -60,8 +58,6 @@ class TestTanh(OpTest): # -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTanhFp16(OpTest): def setUp(self): self.set_npu() @@ -85,11 +81,9 @@ class TestTanhFp16(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3) + self.check_output_with_place(self.place, atol=1e-3) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTanhNet(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py index 04d4565f7485808daa706a9781c2b7159ab9222a..b735adf76d6c1296197960b084aac660c688c5cd 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py @@ -27,8 +27,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTopk(OpTest): def setUp(self): self.set_npu() @@ -56,11 +54,9 @@ class TestTopk(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTopkV2(OpTest): def setUp(self): self.set_npu() @@ -88,7 +84,7 @@ class TestTopkV2(OpTest): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py index 17f6a0ae1ca9bffcb78b3526f78f9a26e4546fc4..e95f3cc83cfb31ee955d65c9f722eb2c0180db86 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py @@ -25,8 +25,6 @@ import paddle.fluid as fluid paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTransposeOp(OpTest): def setUp(self): self.set_npu() @@ -58,11 +56,9 @@ class TestTransposeOp(OpTest): self.axis = -1 def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTransposeOpFP16(TestTransposeOp): no_need_check_grad = True diff --git a/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py index ff89508d196235a8e50678908938ba0fc24d6981..de94e7febaca7692543cb9748f1481557f53d208 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py @@ -29,8 +29,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestTruncatedNormal(unittest.TestCase): def _test(self, run_npu=True): main_prog = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py index 7c358c244f34ddf1c9b3d9e3f055ea333f9b435f..0e21c59432badc13e13a4eaef7871f8ee888050f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py @@ -39,8 +39,6 @@ def output_hist(out): return hist, prob -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUUniformRandomOp(OpTest): def setUp(self): self.set_npu() @@ -76,8 +74,6 @@ class TestNPUUniformRandomOp(OpTest): hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestNPUUniformRandomOpSelectedRows(unittest.TestCase): def get_places(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py index 6dd3c30c272c237b9aff31274b2274cdfa08cf8e..097f31c72467c049dca6f246343a7c79f6ad4561 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py @@ -24,8 +24,6 @@ import paddle paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestUnStackOpBase(OpTest): def initDefaultParameters(self): self.input_dim = (5, 6, 7) @@ -75,29 +73,21 @@ class TestUnStackOpBase(OpTest): self.check_grad_with_place(self.place, ['X'], self.get_y_names()) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp3(TestUnStackOpBase): def initParameters(self): self.axis = -1 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp4(TestUnStackOpBase): def initParameters(self): self.axis = -3 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp5(TestUnStackOpBase): def initParameters(self): self.axis = 1 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestStackOp6(TestUnStackOpBase): def initParameters(self): self.axis = 2 diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py index cae3239229f441812a42be11ee8d8f34253cff05..1388adf609ff62a48832836e461debe09fc9bdca 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py @@ -25,8 +25,6 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestUpdateLossScalingOp(OpTest): def setUp(self): self.set_npu() @@ -71,7 +69,7 @@ class TestUpdateLossScalingOp(OpTest): } def test_check_output(self): - self.check_output_with_place(self.place, check_dygraph=False) + self.check_output_with_place(self.place) class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): @@ -103,8 +101,6 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): } -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, use_npu=True, scope=fluid.Scope()): a = fluid.data(name="a", shape=[1024, 1024], dtype='float32')