diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py index 47da4fdb23ec49924fbfb1b5cc4b02e2355d287e..6a82157faaec41d9abaffa9b68e3a3e80b6b2fb3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py @@ -64,28 +64,28 @@ class TestElementwiseAddOp(OpTest): def test_check_output(self): self.check_output_with_place(self.place, check_dygraph=False) - # TODO(ascendrc): Test grad op after it is implemented. - # def test_check_grad_normal(self): - # self.check_grad_with_place( - # self.place, ['X', 'Y'], - # 'Out', - # max_relative_error=0.006, - # check_dygraph=False) - # - # def test_check_grad_ingore_x(self): - # self.check_grad_with_place( - # self.place, ['Y'], - # 'Out', - # no_grad_set=set("X"), - # max_relative_error=0.006, - # check_dygraph=False) - # - # def test_check_grad_ingore_y(self): - # self.check_grad_with_place( - # self.place, ['X'], - # 'Out', - # no_grad_set=set("Y"), - # max_relative_error=0.006,check_dygraph=False) + def test_check_grad_normal(self): + self.check_grad_with_place( + self.place, ['X', 'Y'], + 'Out', + max_relative_error=0.006, + check_dygraph=False) + + def test_check_grad_ingore_x(self): + self.check_grad_with_place( + self.place, ['Y'], + 'Out', + no_grad_set=set("X"), + max_relative_error=0.006, + check_dygraph=False) + + def test_check_grad_ingore_y(self): + self.check_grad_with_place( + self.place, ['X'], + 'Out', + no_grad_set=set("Y"), + max_relative_error=0.006, + check_dygraph=False) @unittest.skipIf(not paddle.is_compiled_with_npu(), @@ -133,10 +133,6 @@ class TestAddAPI(unittest.TestCase): True, msg="z_value = {}, but expected {}".format(z_value, z_expected)) - def test_backward(self): - # TODO(ascendrc): Test backward after add grad npu op implemented. - pass - @unittest.skipIf(not paddle.is_compiled_with_npu(), "core is not compiled with NPU") diff --git a/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py index add2a1932b1aa9c37dded9f5e3baf2cba8f5ae8f..8c67766b31184a36446c4fa39f64f760fa23912c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py @@ -52,12 +52,9 @@ class TestPow(OpTest): def test_check_output(self): self.check_output_with_place(self.place, check_dygraph=False) - # TODO(ascendrc): Add grad test - # def test_check_grad(self): - # if self.dtype == np.float16: - # return - # self.check_grad(['X'], 'Out') - # + def test_check_grad(self): + self.check_grad_with_place( + self.place, ['X'], 'Out', check_dygraph=False) @unittest.skipIf(not paddle.is_compiled_with_npu(), diff --git a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py index 3e8f424129a0b9d25d2b4303b59cac75cf7e37e7..1e30bb0078213f3d91511e2281cf0b134b49c24c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py @@ -62,6 +62,10 @@ class TestSliceOp(OpTest): def test_check_output(self): self.check_output_with_place(self.place, check_dygraph=False) + def test_check_grad_normal(self): + self.check_grad_with_place( + self.place, ['Input'], 'Out', check_dygraph=False) + @unittest.skipIf(not paddle.is_compiled_with_npu(), "core is not compiled with NPU") diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index efce2e770b1b16d1d70c86211b396b0a060b520e..ae8f64987898d35c7f1960c4f10b5628edfd6f51 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -1416,9 +1416,17 @@ class OpTest(unittest.TestCase): if not type(output_names) is list: output_names = [output_names] + # FIXME: Replace numeric_place with place to calculate numeric_grads. + # NOTE(liym27): There is an unknown error when call op.run() on NPUPlace, which + # needs to be fixed. + if self.__class__.use_npu == True: + numeric_place = paddle.CPUPlace() + else: + numeric_place = place + numeric_grads = user_defined_grads or [ get_numeric_gradient( - place, + numeric_place, self.scope, self.op, self.inputs,