未验证 提交 712f9fe5 编写于 作者: L Leo Chen 提交者: GitHub

[NPU] Refine npu unit tests (#34240)

* add npu unittest only if WITH_ASCEND_CL is ON

* remove @unittest.skipIf, since these unittests will only be created when WITH_ASCEND_CL is ON

* open dygraph test for npu test
上级 f50a67eb
file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
if (WITH_ASCEND_CL)
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endif()
......@@ -25,8 +25,6 @@ import paddle.fluid as fluid
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUAbs(OpTest):
def setUp(self):
self.op_type = "abs"
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAccuracy(OpTest):
def setUp(self):
self.op_type = "accuracy"
......@@ -60,7 +58,7 @@ class TestAccuracy(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestAccuracy2(TestAccuracy):
......
......@@ -25,8 +25,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAdam(OpTest):
def setUp(self):
self.set_npu()
......@@ -75,11 +73,9 @@ class TestAdam(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAdamWithEpsilonTensor(OpTest):
def setUp(self):
self.set_npu()
......@@ -131,11 +127,9 @@ class TestAdamWithEpsilonTensor(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAdamOpWithSkipUpdate(OpTest):
def setUp(self):
self.set_npu()
......@@ -185,11 +179,9 @@ class TestAdamOpWithSkipUpdate(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAdamOpWithGlobalBetaPow(OpTest):
def setUp(self):
self.set_npu()
......@@ -244,11 +236,9 @@ class TestAdamOpWithGlobalBetaPow(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5, check_dygraph=False)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......@@ -309,8 +299,6 @@ class TestNet(unittest.TestCase):
self.assertTrue(np.allclose(npu_loss, cpu_loss, rtol=1e-3))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNetWithEpsilonTensor(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -25,8 +25,6 @@ from paddle.fluid.contrib.mixed_precision.amp_nn import check_finite_and_unscale
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestCheckFiniteAndUnscale(unittest.TestCase):
def get_prog(self):
paddle.enable_static()
......@@ -39,11 +37,11 @@ class TestCheckFiniteAndUnscale(unittest.TestCase):
name="status", shape=[8], dtype='float32')
main_program.global_block().append_op(
type="alloc_float_status",
outputs={"FloatStatus": float_status}, )
outputs={"FloatStatus": float_status})
main_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, )
outputs={"FloatStatusOut": float_status})
c = paddle.fluid.layers.elementwise_div(a, b)
out, found_inf = check_finite_and_unscale(
[c], scale, float_status=float_status)
......@@ -95,8 +93,6 @@ class TestCheckFiniteAndUnscale(unittest.TestCase):
self.assertFalse(found_inf[0])
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
def get_prog(self):
paddle.enable_static()
......@@ -109,21 +105,21 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
name="status", shape=[8], dtype='float32')
main_program.global_block().append_op(
type="alloc_float_status",
outputs={"FloatStatus": float_status}, )
outputs={"FloatStatus": float_status})
main_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, )
outputs={"FloatStatusOut": float_status})
c = paddle.fluid.layers.elementwise_div(a, b)
out, found_inf = check_finite_and_unscale(
[c], scale, float_status=float_status)
main_program.global_block().append_op(
type="alloc_float_status",
outputs={"FloatStatus": float_status}, )
outputs={"FloatStatus": float_status})
main_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, )
outputs={"FloatStatusOut": float_status})
d = paddle.fluid.layers.elementwise_add(a, b)
out, found_inf = check_finite_and_unscale(
[d], scale, float_status=float_status)
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAssign(OpTest):
def setUp(self):
self.set_npu()
......@@ -49,7 +47,7 @@ class TestAssign(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -147,8 +147,6 @@ def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None):
return expected
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestBCELoss(unittest.TestCase):
def test_BCELoss(self):
input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32)
......@@ -220,8 +218,6 @@ def bce_loss(input, label):
return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestBceLossOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -248,15 +244,11 @@ class TestBceLossOp(OpTest):
self.shape = [10, 10]
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestBceLossOpCase1(OpTest):
def init_test_cast(self):
self.shape = [2, 3, 4, 5]
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestBceLossOpCase2(OpTest):
def init_test_cast(self):
self.shape = [2, 3, 20]
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestCast1(OpTest):
def setUp(self):
self.set_npu()
......@@ -48,7 +46,7 @@ class TestCast1(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestCast2(OpTest):
......@@ -70,7 +68,7 @@ class TestCast2(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3)
self.check_output_with_place(self.place, atol=1e-3)
class TestCast3(OpTest):
......@@ -92,7 +90,7 @@ class TestCast3(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3)
self.check_output_with_place(self.place, atol=1e-3)
if __name__ == '__main__':
......
......@@ -28,8 +28,6 @@ SEED = 2021
alignment = 512
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAllocContinuousSpace(OpTest):
def setUp(self):
self.__class__.use_npu = True
......@@ -82,12 +80,9 @@ class TestAllocContinuousSpace(OpTest):
self.check_output_with_place(
place=paddle.NPUPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
check_dygraph=False)
atol=1e-5, )
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAllocContinuousSpace2(TestAllocContinuousSpace):
def init_attr(self):
return {
......@@ -102,8 +97,7 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace):
self.check_output_with_place(
place=paddle.NPUPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
check_dygraph=False)
atol=1e-5, )
if __name__ == '__main__':
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestEqual(OpTest):
def setUp(self):
self.set_npu()
......@@ -53,11 +51,9 @@ class TestEqual(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLessthan(OpTest):
def setUp(self):
self.set_npu()
......@@ -83,7 +79,7 @@ class TestLessthan(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestEqual2(TestEqual):
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestConcat(OpTest):
def setUp(self):
self.set_npu()
......@@ -56,7 +54,7 @@ class TestConcat(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def init_test_data(self):
self.x0 = np.random.random((1, 4, 50)).astype(self.dtype)
......@@ -65,12 +63,9 @@ class TestConcat(OpTest):
self.axis = 0
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['x0', 'x2'], 'Out', check_dygraph=False)
self.check_grad_with_place(
self.place, ['x1'], 'Out', check_dygraph=False)
self.check_grad_with_place(
self.place, ['x2'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['x0', 'x2'], 'Out')
self.check_grad_with_place(self.place, ['x1'], 'Out')
self.check_grad_with_place(self.place, ['x2'], 'Out')
class TestConcatFP16(OpTest):
......@@ -102,7 +97,7 @@ class TestConcatFP16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def init_test_data(self):
self.x0 = np.random.random((1, 4, 50)).astype(self.dtype)
......
......@@ -28,8 +28,6 @@ SEED = 2021
EPOCH = 100
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
......@@ -55,17 +53,14 @@ class TestDropoutOp(OpTest):
self.place = paddle.NPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, ['X'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOpInput1d(TestDropoutOp):
# change input shape
def setUp(self):
......@@ -85,15 +80,13 @@ class TestDropoutOpInput1d(TestDropoutOp):
}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOpInput1d(TestDropoutOp):
# the input is 1-D
def setUp(self):
self.op_type = "dropout"
self.set_npu()
self.init_dtype()
self.inputs = {'X': np.random.random((2000, )).astype(self.dtype)}
self.inputs = {'X': np.random.random((2000)).astype(self.dtype)}
self.attrs = {
'dropout_prob': 0.0,
'fix_seed': True,
......@@ -106,8 +99,6 @@ class TestDropoutOpInput1d(TestDropoutOp):
}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOp2(TestDropoutOp):
# the dropout_prob is 1.0
def setUp(self):
......@@ -127,8 +118,6 @@ class TestDropoutOp2(TestDropoutOp):
}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOp3(TestDropoutOp):
# the input dim is 3
def setUp(self):
......@@ -148,8 +137,6 @@ class TestDropoutOp3(TestDropoutOp):
}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestDropoutOpInference(OpTest):
# is_test = True
......@@ -174,11 +161,9 @@ class TestDropoutOpInference(OpTest):
self.place = paddle.NPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestDropoutOpInference2(TestDropoutOpInference):
def setUp(self):
......@@ -194,8 +179,6 @@ class TestDropoutOpInference2(TestDropoutOpInference):
self.outputs = {'Out': self.inputs['X']}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOpWithSeed(TestDropoutOp):
# the seed is a Tensor
def setUp(self):
......@@ -218,8 +201,6 @@ class TestDropoutOpWithSeed(TestDropoutOp):
}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutOpFp16(TestDropoutOp):
# float16
def init_dtype(self):
......@@ -231,8 +212,6 @@ class TestDropoutOpFp16(TestDropoutOp):
self.place = paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestDropoutAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
......
......@@ -25,8 +25,6 @@ import paddle.fluid as fluid
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseAddOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -62,34 +60,32 @@ class TestElementwiseAddOp(OpTest):
self.axis = -1
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(
self.place, ['X', 'Y'],
self.place,
['X', 'Y'],
'Out',
max_relative_error=0.006,
check_dygraph=False)
max_relative_error=0.006, )
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'],
self.place,
['Y'],
'Out',
no_grad_set=set("X"),
max_relative_error=0.006,
check_dygraph=False)
max_relative_error=0.006, )
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'],
self.place,
['X'],
'Out',
no_grad_set=set("Y"),
max_relative_error=0.006,
check_dygraph=False)
max_relative_error=0.006, )
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAddAPI(unittest.TestCase):
def test_name(self):
with paddle.static.program_guard(paddle.static.Program()):
......@@ -134,8 +130,6 @@ class TestAddAPI(unittest.TestCase):
msg="z_value = {}, but expected {}".format(z_value, z_expected))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAddError(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseDiv(OpTest):
def setUp(self):
self.set_npu()
......@@ -54,30 +52,28 @@ class TestElementwiseDiv(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(
self.place, ['X', 'Y'],
self.place,
['X', 'Y'],
'Out',
max_relative_error=0.007,
check_dygraph=False)
max_relative_error=0.007, )
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'],
self.place,
['Y'],
'Out',
max_relative_error=0.007,
no_grad_set=set("X"),
check_dygraph=False)
no_grad_set=set("X"), )
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set("Y"), check_dygraph=False)
self.place, ['X'], 'Out', no_grad_set=set("Y"))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseDivFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -105,11 +101,9 @@ class TestElementwiseDivFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseDivNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -24,8 +24,6 @@ import paddle
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseFloorDiv(OpTest):
def setUp(self):
self.op_type = "elementwise_floordiv"
......@@ -53,11 +51,9 @@ class TestElementwiseFloorDiv(OpTest):
self.dtype = "int64"
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseFloorDiv2(TestElementwiseFloorDiv):
def init_dtype(self):
self.dtype = "int32"
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMax(OpTest):
def setUp(self):
self.set_npu()
......@@ -54,7 +52,7 @@ class TestElementwiseMax(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Max grad test
# def test_check_grad(self):
......@@ -64,8 +62,6 @@ class TestElementwiseMax(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMaxFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -93,11 +89,9 @@ class TestElementwiseMaxFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMaxNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMin(OpTest):
def setUp(self):
self.set_npu()
......@@ -54,7 +52,7 @@ class TestElementwiseMin(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Min grad test
# def test_check_grad(self):
......@@ -64,8 +62,6 @@ class TestElementwiseMin(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMinFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -93,11 +89,9 @@ class TestElementwiseMinFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMinNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMul(OpTest):
def setUp(self):
self.set_npu()
......@@ -54,7 +52,7 @@ class TestElementwiseMul(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Mul grad test
# def test_check_grad(self):
......@@ -64,8 +62,6 @@ class TestElementwiseMul(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMulFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -93,11 +89,9 @@ class TestElementwiseMulFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseMulNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwisePow(OpTest):
def setUp(self):
self.set_npu()
......@@ -54,7 +52,7 @@ class TestElementwisePow(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Pow grad test
# def test_check_grad(self):
......@@ -64,8 +62,6 @@ class TestElementwisePow(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwisePowFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -93,11 +89,9 @@ class TestElementwisePowFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwisePowNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestElementwiseSubOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -64,7 +62,7 @@ class TestElementwiseSubOp(OpTest):
self.axis = 0
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): For grad tests, OpTest raises FatalError:Segmentation fault
# when call op.run, which may be caused by system environment exception
......@@ -74,7 +72,7 @@ class TestElementwiseSubOp(OpTest):
# self.place, ['X', 'Y'],
# 'Out',
# max_relative_error=0.006,
# check_dygraph=False)
# )
#
# def test_check_grad_ingore_x(self):
# self.check_grad_with_place(
......@@ -82,18 +80,16 @@ class TestElementwiseSubOp(OpTest):
# 'Out',
# no_grad_set=set("X"),
# max_relative_error=0.006,
# check_dygraph=False)
# )
#
# def test_check_grad_ingore_y(self):
# self.check_grad_with_place(
# self.place, ['X'],
# 'Out',
# no_grad_set=set("Y"),
# max_relative_error=0.006,check_dygraph=False)
# max_relative_error=0.006,)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSubtractAPI(unittest.TestCase):
def test_name(self):
with paddle.static.program_guard(paddle.static.Program()):
......@@ -138,8 +134,6 @@ class TestSubtractAPI(unittest.TestCase):
msg="z_value = {}, but expected {}".format(z_value, z_expected))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSubtractError(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
......@@ -158,8 +152,6 @@ class TestSubtractError(unittest.TestCase):
self.assertRaises(TypeError, paddle.subtract, x2, y2)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSubtractNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestExpand(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,7 +48,7 @@ class TestExpand(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
......@@ -60,8 +58,6 @@ class TestExpand(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestExpandV2(TestExpand):
def setUp(self):
self.set_npu()
......@@ -82,8 +78,6 @@ class TestExpandV2(TestExpand):
self.outputs = {'Out': out}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestExpandFp16(TestExpand):
no_need_check_grad = True
......@@ -91,8 +85,6 @@ class TestExpandFp16(TestExpand):
self.dtype = np.float16
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestExpandNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestFillConstant(OpTest):
def setUp(self):
self.set_npu()
......@@ -47,7 +45,7 @@ class TestFillConstant(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestFillConstantInt(OpTest):
......@@ -71,7 +69,7 @@ class TestFillConstantInt(OpTest):
self.dtype = np.int32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestFillConstantFP16(OpTest):
......@@ -95,7 +93,7 @@ class TestFillConstantFP16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3)
self.check_output_with_place(self.place, atol=1e-3)
if __name__ == '__main__':
......
......@@ -34,8 +34,6 @@ def gather_numpy(x, index, axis):
return gather
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestGatherOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -53,14 +51,14 @@ class TestGatherOp(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'],
self.place,
['X'],
'Out',
max_relative_error=0.006,
check_dygraph=False)
max_relative_error=0.006, )
def config(self):
"""
......@@ -72,8 +70,6 @@ class TestGatherOp(OpTest):
self.index_type = "int32"
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestCase1(TestGatherOp):
def config(self):
"""
......@@ -85,8 +81,6 @@ class TestCase1(TestGatherOp):
self.index_type = "int32"
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class API_TestGather(unittest.TestCase):
def test_out1(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
......@@ -120,8 +114,6 @@ class API_TestGather(unittest.TestCase):
self.assertTrue(np.allclose(result, expected_output))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestGatherGrad(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ from test_gaussian_random_op import TestGaussianRandomOp
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUGaussianRandomOp(OpTest):
def setUp(self):
self.set_npu()
......
......@@ -32,8 +32,6 @@ def np_gelu(x):
return y
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestGelu(OpTest):
def setUp(self):
self.set_npu()
......@@ -56,18 +54,13 @@ class TestGelu(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3)
self.check_output_with_place(self.place, atol=1e-3)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'],
'Out',
check_dygraph=False,
max_relative_error=0.007)
self.place, ['X'], 'Out', max_relative_error=0.007)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestGeluFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -91,11 +84,9 @@ class TestGeluFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3)
self.check_output_with_place(self.place, atol=1e-3)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestGeluNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -29,8 +29,6 @@ SEED = 2021
NPUPlace = 0
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestIncrement(OpTest):
def setUp(self):
self.set_npu()
......@@ -54,11 +52,9 @@ class TestIncrement(OpTest):
self.dtype = np.int64
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestIncrementFP16(OpTest):
def setUp(self):
self.set_npu()
......@@ -82,11 +78,9 @@ class TestIncrementFP16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestIncrementInplace(unittest.TestCase):
def test_npu(self):
main_prog = paddle.static.Program()
......
......@@ -36,8 +36,6 @@ from op_test import _set_use_system_allocator
_set_use_system_allocator(False)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLayerNormOp(unittest.TestCase):
def setUp(self):
self.use_cudnn = True
......@@ -191,8 +189,6 @@ class TestLayerNormOp(unittest.TestCase):
self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLayerNormOpFP16(TestLayerNormOp):
def init_dtype(self):
self.dtype = np.float16
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLog(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,7 +48,7 @@ class TestLog(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
......@@ -60,8 +58,6 @@ class TestLog(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLogFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -85,11 +81,9 @@ class TestLogFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLogNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -220,8 +220,6 @@ def type_map_factory():
} for x_type in x_type_list for y_type in y_type_list]
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestCPU(unittest.TestCase):
def test(self):
test(self)
......@@ -235,8 +233,6 @@ class TestCPU(unittest.TestCase):
test_type_error(self, False, type_map)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPU(unittest.TestCase):
def test(self):
test(self, True)
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLookupTableV2(OpTest):
def setUp(self):
self.set_npu()
......@@ -67,17 +65,14 @@ class TestLookupTableV2(OpTest):
self.dim = 20
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, ['W'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['W'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLookupTableV2FP16(TestLookupTableV2):
no_need_check_grad = True
......@@ -89,16 +84,12 @@ class TestLookupTableV2FP16(TestLookupTableV2):
self.__class__.no_need_check_grad = True
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLookupTableV2Dim32(TestLookupTableV2):
def init_dim(self):
# embedding_dim is multiple of 32
self.dim = 64
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestLookupTableV2Dim32FP16(TestLookupTableV2):
no_need_check_grad = True
......
......@@ -26,15 +26,13 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
"""Reference forward implementation using np.matmul."""
# np.matmul does not support the transpose flags, so we manually
# transpose X and Y appropriately.
if transpose_X:
if X.ndim == 1:
X = X.reshape((X.size, ))
X = X.reshape((X.size))
elif X.ndim == 2:
X = X.T
else:
......@@ -43,7 +41,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((Y.size, ))
Y = Y.reshape((Y.size))
else:
dim = [i for i in range(len(Y.shape))]
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
......@@ -53,7 +51,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
if not Out.shape:
# We do not support 0-dimensional Tensors (scalars). So where
# np.matmul outputs a scalar, we must convert to a Tensor of
# shape (1, ) instead.
# shape (1) instead.
# Everywhere else, we are compatible with np.matmul.
Out = np.array([Out], dtype="float64")
return Out
......@@ -95,7 +93,7 @@ class TestMatMul(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
# TODO(ascendrc): Add grad test
......@@ -137,8 +135,6 @@ class TestMatMul4(TestMatMul):
self.trans_y = False
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMatMulNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......@@ -207,8 +203,8 @@ class TestMatMulNet(unittest.TestCase):
# The precision is aligned in NPU and GPU separately, which is only used for the usage method.
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMatMulNet3_2(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMean(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,15 +48,12 @@ class TestMean(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMeanFP16(OpTest):
def setUp(self):
self.set_npu()
......@@ -81,7 +76,7 @@ class TestMeanFP16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -28,8 +28,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMemcpy_FillConstant(unittest.TestCase):
def get_prog(self):
paddle.enable_static()
......
......@@ -52,30 +52,30 @@ class TestMul(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
def test_check_grad_normal(self):
self.check_grad_with_place(
self.place, ['X', 'Y'],
self.place,
['X', 'Y'],
'Out',
max_relative_error=0.0065,
check_dygraph=False)
max_relative_error=0.0065, )
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'],
self.place,
['Y'],
'Out',
no_grad_set=set("X"),
max_relative_error=0.0065,
check_dygraph=False)
max_relative_error=0.0065, )
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'],
self.place,
['X'],
'Out',
no_grad_set=set("Y"),
max_relative_error=0.0065,
check_dygraph=False)
max_relative_error=0.0065, )
@skip_check_grad_ci(
......@@ -170,8 +170,6 @@ class TestMul3FP16(TestMul3):
pass
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMulNet(unittest.TestCase):
def init_dtype(self):
self.dtype = np.float32
......@@ -243,8 +241,6 @@ class TestMulNet(unittest.TestCase):
self.assertTrue(np.allclose(npu_loss, cpu_loss))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMulNet3_2(unittest.TestCase):
def init_dtype(self):
self.dtype = np.float32
......@@ -317,8 +313,6 @@ class TestMulNet3_2(unittest.TestCase):
self.assertTrue(np.allclose(npu_loss, cpu_loss, atol=1e-5))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestMulNet3_2_xc2(unittest.TestCase):
def init_dtype(self):
self.dtype = np.float32
......
......@@ -22,8 +22,6 @@ from paddle.fluid import core
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNpuPlace(unittest.TestCase):
def test(self):
p = core.Place()
......@@ -33,8 +31,6 @@ class TestNpuPlace(unittest.TestCase):
self.assertEqual(p.npu_device_id(), 0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNpuPlaceError(unittest.TestCase):
def test_static(self):
# NPU is not supported in ParallelExecutor
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestPow(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,15 +48,12 @@ class TestPow(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestPowFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -82,11 +77,9 @@ class TestPowFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestPowNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -28,8 +28,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAny8DOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -46,11 +44,9 @@ class TestAny8DOp(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.set_npu()
......@@ -64,11 +60,9 @@ class TestAnyOpWithDim(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.set_npu()
......@@ -85,18 +79,16 @@ class TestAny8DOpWithDim(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.attrs = {'dim': (1), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
......@@ -106,7 +98,7 @@ class TestAnyOpWithKeepDim(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestAny8DOpWithKeepDim(OpTest):
......@@ -118,7 +110,7 @@ class TestAny8DOpWithKeepDim(OpTest):
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.attrs = {'dim': (1), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
......@@ -128,7 +120,7 @@ class TestAny8DOpWithKeepDim(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReduceSum(OpTest):
def setUp(self):
np.random.seed(SEED)
......@@ -66,10 +64,10 @@ class TestReduceSum(OpTest):
def initTestCase(self):
self.shape = (5, 6)
self.axis = (0, )
self.axis = (0)
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
......@@ -84,8 +82,6 @@ class TestReduceSum2(OpTest):
self.dtype = np.int32
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReduceSumNet(unittest.TestCase):
def set_reduce_sum_function(self, x):
# keep_dim = False
......@@ -151,16 +147,12 @@ class TestReduceSumNet(unittest.TestCase):
self.assertTrue(np.allclose(npu_loss, cpu_loss))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReduceSumNet2(TestReduceSumNet):
def set_reduce_sum_function(self, x):
# keep_dim = True
return paddle.fluid.layers.reduce_sum(x, dim=-1, keep_dim=True)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReduceSumNet3(TestReduceSumNet):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestRelu(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,11 +48,9 @@ class TestRelu(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReluFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -78,11 +74,9 @@ class TestReluFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReluNeg(OpTest):
def setUp(self):
self.set_npu()
......@@ -105,13 +99,13 @@ class TestReluNeg(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
#
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReluNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestReshape2(OpTest):
def setUp(self):
self.set_npu()
......@@ -51,12 +49,10 @@ class TestReshape2(OpTest):
self.infered_shape = (20, 10)
def test_check_output(self):
self.check_output_with_place(
self.place, check_dygraph=False, no_check_set=['XShape'])
self.check_output_with_place(self.place, no_check_set=['XShape'])
def test_check_grad_normal(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['X'], 'Out')
class TestReshape2_case2(TestReshape2):
......
......@@ -36,56 +36,42 @@ from test_static_save_load import *
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUSaveLoadBase(TestSaveLoadBase):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUSaveLoadPartial(TestSaveLoadPartial):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUSaveLoadSetStateDict(TestSaveLoadSetStateDict):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUProgramStatePartial(TestProgramStatePartial):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPULoadFromOldInterface(TestLoadFromOldInterface):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPULoadFromOldInterfaceSingleFile(TestLoadFromOldInterfaceSingleFile):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUProgramStateOldSave(TestProgramStateOldSave):
def setUp(self):
self.test_dygraph = False
......@@ -95,8 +81,6 @@ class TestNPUProgramStateOldSave(TestProgramStateOldSave):
) else paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUProgramStateOldSaveSingleModel(TestProgramStateOldSaveSingleModel):
def set_place(self):
return fluid.CPUPlace() if not core.is_compiled_with_npu(
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestScale(OpTest):
def setUp(self):
self.set_npu()
......@@ -51,7 +49,7 @@ class TestScale(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestFP16Scale(TestScale):
......@@ -82,7 +80,7 @@ class TestBiasAfterScale(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestCast1(OpTest):
def setUp(self):
self.set_npu()
......@@ -49,7 +47,7 @@ class TestCast1(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestCast2(OpTest):
......@@ -72,7 +70,7 @@ class TestCast2(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestCast3(OpTest):
......@@ -95,7 +93,7 @@ class TestCast3(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestCast4(OpTest):
......@@ -119,7 +117,7 @@ class TestCast4(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -26,8 +26,6 @@ import paddle.fluid.core as core
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSeedOpFixSeed(OpTest):
def setUp(self):
self.set_npu()
......@@ -43,8 +41,6 @@ class TestSeedOpFixSeed(OpTest):
self.check_output_with_place(paddle.NPUPlace(0))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSeedOpDiffSeed(OpTest):
def setUp(self):
self.set_npu()
......
......@@ -24,8 +24,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSGD(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,11 +48,9 @@ class TestSGD(OpTest):
self.w = 15
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestShape(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,7 +48,7 @@ class TestShape(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -27,8 +27,6 @@ SEED = 2021
EPOCH = 100
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSliceOp(OpTest):
def setUp(self):
self.op_type = "slice"
......@@ -60,13 +58,12 @@ class TestSliceOp(OpTest):
self.place = paddle.NPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, ['Input'], 'Out', check_dygraph=False)
self.check_grad_with_place(self.place, ['Input'], 'Out')
class TestSliceOp2(TestSliceOp):
......@@ -79,8 +76,6 @@ class TestSliceOp2(TestSliceOp):
self.out = self.input[:, 0:1, :]
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSliceOpFp16(TestSliceOp):
def init_dtype(self):
self.dtype = np.float16
......@@ -147,8 +142,6 @@ class TestSliceOpTensor2(TestSliceOpTensor):
self.out = self.input[:, 0:1, :]
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSliceOpFp16Tensor(TestSliceOpTensor):
def init_dtype(self):
self.dtype = np.float16
......@@ -237,8 +230,6 @@ class TestSliceOpTensorList2(TestSliceOpTensorList):
self.out = self.input[:, 0:1, :]
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSliceOpFp16TensorList(TestSliceOpTensorList):
def init_dtype(self):
self.dtype = np.float16
......@@ -249,8 +240,6 @@ class TestSliceOpFp16TensorList(TestSliceOpTensorList):
self.place = paddle.NPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSliceNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSoftmax(OpTest):
def setUp(self):
self.set_npu()
......@@ -51,11 +49,9 @@ class TestSoftmax(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSoftmaxNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -28,8 +28,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSoftmaxWithCrossEntropyOp(OpTest):
def set_npu(self):
self.__class__.use_npu = True
......@@ -86,7 +84,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
self.attrs['axis'] = self.axis
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
def test_check_grad(self):
if self.dtype == np.float16:
......@@ -95,13 +93,10 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
self.check_grad_with_place(
self.place, ['Logits'],
'Loss',
check_dygraph=False,
numeric_grad_delta=0.001,
max_relative_error=0.5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestPowNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqrt(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,7 +48,7 @@ class TestSqrt(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
......@@ -60,8 +58,6 @@ class TestSqrt(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqrtFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -85,11 +81,9 @@ class TestSqrtFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqrtNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSquare(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,7 +48,7 @@ class TestSquare(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
......@@ -60,8 +58,6 @@ class TestSquare(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSquareFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -85,11 +81,9 @@ class TestSquareFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
self.check_output_with_place(self.place, atol=1e-5)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSquareNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -26,10 +26,9 @@ from paddle.fluid import Program, program_guard
paddle.enable_static()
# Correct: General.
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqueezeOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -58,8 +57,8 @@ class TestSqueezeOp(OpTest):
# Correct: There is mins axis.
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
......@@ -68,8 +67,8 @@ class TestSqueezeOp1(TestSqueezeOp):
# Correct: No axes input.
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqueezeOp2(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
......@@ -78,8 +77,8 @@ class TestSqueezeOp2(TestSqueezeOp):
# Correct: Just part of axes be squeezed.
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
......@@ -88,8 +87,8 @@ class TestSqueezeOp3(TestSqueezeOp):
# Correct: The demension of axis is not of size 1 remains unchanged.
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSqueezeOp4(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
......
......@@ -26,8 +26,6 @@ import paddle.fluid.core as core
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOpBase(OpTest):
def initDefaultParameters(self):
self.num_inputs = 4
......@@ -77,50 +75,36 @@ class TestStackOpBase(OpTest):
self.check_grad_with_place(self.place, self.get_x_names(), 'Y')
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp1(TestStackOpBase):
def initParameters(self):
self.num_inputs = 16
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp2(TestStackOpBase):
def initParameters(self):
self.num_inputs = 20
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp3(TestStackOpBase):
def initParameters(self):
self.axis = -1
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp4(TestStackOpBase):
def initParameters(self):
self.axis = -4
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp5(TestStackOpBase):
def initParameters(self):
self.axis = 1
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp6(TestStackOpBase):
def initParameters(self):
self.axis = 3
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackAPIWithLoDTensorArray(unittest.TestCase):
"""
Test stack api when the input(x) is a LoDTensorArray.
......@@ -157,8 +141,6 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase):
[self.x] * self.iter_num, axis=self.axis)))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
"""
Test stack api when the input(x) is a LoDTensorArray.
......@@ -195,8 +177,6 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
[self.x] * self.iter_num, axis=self.axis)))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class API_test(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
......@@ -223,8 +203,6 @@ class API_test(unittest.TestCase):
self.assertRaises(TypeError, paddle.stack, x)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class API_DygraphTest(unittest.TestCase):
def test_out(self):
data1 = np.array([[1.0, 2.0]])
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestSum1(OpTest):
def setUp(self):
self.set_npu()
......@@ -52,7 +50,7 @@ class TestSum1(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestSum2(OpTest):
......@@ -86,7 +84,7 @@ class TestSum2(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestSum3(OpTest):
......@@ -111,7 +109,7 @@ class TestSum3(OpTest):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -26,8 +26,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTanh(OpTest):
def setUp(self):
self.set_npu()
......@@ -50,7 +48,7 @@ class TestTanh(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
......@@ -60,8 +58,6 @@ class TestTanh(OpTest):
#
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTanhFp16(OpTest):
def setUp(self):
self.set_npu()
......@@ -85,11 +81,9 @@ class TestTanhFp16(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-3)
self.check_output_with_place(self.place, atol=1e-3)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTanhNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -27,8 +27,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTopk(OpTest):
def setUp(self):
self.set_npu()
......@@ -56,11 +54,9 @@ class TestTopk(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTopkV2(OpTest):
def setUp(self):
self.set_npu()
......@@ -88,7 +84,7 @@ class TestTopkV2(OpTest):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
if __name__ == '__main__':
......
......@@ -25,8 +25,6 @@ import paddle.fluid as fluid
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTransposeOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -58,11 +56,9 @@ class TestTransposeOp(OpTest):
self.axis = -1
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTransposeOpFP16(TestTransposeOp):
no_need_check_grad = True
......
......@@ -29,8 +29,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTruncatedNormal(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
......
......@@ -39,8 +39,6 @@ def output_hist(out):
return hist, prob
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUUniformRandomOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -76,8 +74,6 @@ class TestNPUUniformRandomOp(OpTest):
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestNPUUniformRandomOpSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
......
......@@ -24,8 +24,6 @@ import paddle
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestUnStackOpBase(OpTest):
def initDefaultParameters(self):
self.input_dim = (5, 6, 7)
......@@ -75,29 +73,21 @@ class TestUnStackOpBase(OpTest):
self.check_grad_with_place(self.place, ['X'], self.get_y_names())
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp3(TestUnStackOpBase):
def initParameters(self):
self.axis = -1
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp4(TestUnStackOpBase):
def initParameters(self):
self.axis = -3
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp5(TestUnStackOpBase):
def initParameters(self):
self.axis = 1
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestStackOp6(TestUnStackOpBase):
def initParameters(self):
self.axis = 2
......
......@@ -25,8 +25,6 @@ paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestUpdateLossScalingOp(OpTest):
def setUp(self):
self.set_npu()
......@@ -71,7 +69,7 @@ class TestUpdateLossScalingOp(OpTest):
}
def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)
self.check_output_with_place(self.place)
class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
......@@ -103,8 +101,6 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
}
@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestUpdateLossScalingLayer(unittest.TestCase):
def loss_scaling_check(self, use_npu=True, scope=fluid.Scope()):
a = fluid.data(name="a", shape=[1024, 1024], dtype='float32')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册