未验证 提交 d00c2ca6 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][UP005] replace deprecated unittest aliases (#49522)

上级 419c2d14
...@@ -142,7 +142,7 @@ class TestCheckCompiler(TestABIBase): ...@@ -142,7 +142,7 @@ class TestCheckCompiler(TestABIBase):
class TestRunCMDException(unittest.TestCase): class TestRunCMDException(unittest.TestCase):
def test_exception(self): def test_exception(self):
for verbose in [True, False]: for verbose in [True, False]:
with self.assertRaisesRegexp(RuntimeError, "Failed to run command"): with self.assertRaisesRegex(RuntimeError, "Failed to run command"):
cmd = "fake cmd" cmd = "fake cmd"
utils.run_cmd(cmd, verbose) utils.run_cmd(cmd, verbose)
......
...@@ -396,17 +396,17 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase): ...@@ -396,17 +396,17 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
net = SimpleNet() net = SimpleNet()
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
net.forward.concrete_program net.forward.concrete_program
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
net.forward.inputs net.forward.inputs
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
net.forward.outputs net.forward.outputs
......
...@@ -357,12 +357,12 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase): ...@@ -357,12 +357,12 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase):
net = Net() net = Net()
self.program_translator.enable(True) self.program_translator.enable(True)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
self.program_translator.get_output(net.forward, self.x) self.program_translator.get_output(net.forward, self.x)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
self.program_translator.get_program(net.forward, self.x) self.program_translator.get_program(net.forward, self.x)
......
...@@ -97,7 +97,7 @@ class TestArgsSpecName(unittest.TestCase): ...@@ -97,7 +97,7 @@ class TestArgsSpecName(unittest.TestCase):
return name_ids[name] return name_ids[name]
mode = [to_idx(name) for name in in_names] mode = [to_idx(name) for name in in_names]
self.assertEquals(mode, expect_mode) self.assertEqual(mode, expect_mode)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -140,12 +140,12 @@ class TestIrMemOptBase(BuildIrMemOptBase): ...@@ -140,12 +140,12 @@ class TestIrMemOptBase(BuildIrMemOptBase):
self.network self.network
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(baseline_last_loss), np.mean(baseline_last_loss),
np.mean(cur_last_loss), np.mean(cur_last_loss),
delta=1e-6, delta=1e-6,
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(baseline_first_loss), np.mean(baseline_first_loss),
np.mean(cur_first_loss), np.mean(cur_first_loss),
delta=1e-6, delta=1e-6,
......
...@@ -82,7 +82,7 @@ def case_generator(op_type, Xshape, diagonal, expected): ...@@ -82,7 +82,7 @@ def case_generator(op_type, Xshape, diagonal, expected):
paddle.enable_static() paddle.enable_static()
data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) data = fluid.data(shape=Xshape, dtype='float64', name=cls_name)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
eval(expected.split(':')[-1]), errmsg[expected] eval(expected.split(':')[-1]), errmsg[expected]
): ):
getattr(tensor, op_type)(x=data, diagonal=diagonal) getattr(tensor, op_type)(x=data, diagonal=diagonal)
......
...@@ -83,7 +83,7 @@ def case_generator(op_type, Xshape, diagonal, expected): ...@@ -83,7 +83,7 @@ def case_generator(op_type, Xshape, diagonal, expected):
paddle.enable_static() paddle.enable_static()
data = fluid.data(shape=Xshape, dtype='float32', name=cls_name) data = fluid.data(shape=Xshape, dtype='float32', name=cls_name)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
eval(expected.split(':')[-1]), errmsg[expected] eval(expected.split(':')[-1]), errmsg[expected]
): ):
getattr(tensor, op_type)(x=data, diagonal=diagonal) getattr(tensor, op_type)(x=data, diagonal=diagonal)
......
...@@ -50,16 +50,16 @@ class TestResnetBase(TestParallelExecutorBase): ...@@ -50,16 +50,16 @@ class TestResnetBase(TestParallelExecutorBase):
if compare_separately: if compare_separately:
for loss in zip(func_1_first_loss, func_2_first_loss): for loss in zip(func_1_first_loss, func_2_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) self.assertAlmostEqual(loss[0], loss[1], delta=1e-5)
for loss in zip(func_1_last_loss, func_2_last_loss): for loss in zip(func_1_last_loss, func_2_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2) self.assertAlmostEqual(loss[0], loss[1], delta=delta2)
else: else:
np.testing.assert_allclose( np.testing.assert_allclose(
func_1_loss_area, func_2_loss_area, rtol=delta2 func_1_loss_area, func_2_loss_area, rtol=delta2
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(func_1_first_loss), func_2_first_loss[0], delta=1e-5 np.mean(func_1_first_loss), func_2_first_loss[0], delta=1e-5
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(func_1_last_loss), func_2_last_loss[0], delta=delta2 np.mean(func_1_last_loss), func_2_last_loss[0], delta=delta2
) )
...@@ -177,12 +177,12 @@ class TestBuffer(unittest.TestCase): ...@@ -177,12 +177,12 @@ class TestBuffer(unittest.TestCase):
net = fluid.Layer() net = fluid.Layer()
var = to_variable(np.zeros([1])) var = to_variable(np.zeros([1]))
with self.assertRaisesRegexp( with self.assertRaisesRegex(
TypeError, "name of buffer should be a string" TypeError, "name of buffer should be a string"
): ):
net.register_buffer(12, var) net.register_buffer(12, var)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
TypeError, "buffer should be a Paddle.Tensor" TypeError, "buffer should be a Paddle.Tensor"
): ):
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -194,18 +194,18 @@ class TestBuffer(unittest.TestCase): ...@@ -194,18 +194,18 @@ class TestBuffer(unittest.TestCase):
"buffer_name", ParamBase([2, 2], 'float32') "buffer_name", ParamBase([2, 2], 'float32')
) )
with self.assertRaisesRegexp( with self.assertRaisesRegex(
KeyError, "name of buffer can not contain" KeyError, "name of buffer can not contain"
): ):
net.register_buffer("buffer.name", var) net.register_buffer("buffer.name", var)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
KeyError, "name of buffer can not be empty" KeyError, "name of buffer can not be empty"
): ):
net.register_buffer("", var) net.register_buffer("", var)
net.attr_name = 10 net.attr_name = 10
with self.assertRaisesRegexp(KeyError, "already exists"): with self.assertRaisesRegex(KeyError, "already exists"):
net.register_buffer("attr_name", var) net.register_buffer("attr_name", var)
del net.attr_name del net.attr_name
...@@ -213,7 +213,7 @@ class TestBuffer(unittest.TestCase): ...@@ -213,7 +213,7 @@ class TestBuffer(unittest.TestCase):
net.attr_name = EagerParamBase([2, 2], 'float32') net.attr_name = EagerParamBase([2, 2], 'float32')
else: else:
net.attr_name = ParamBase([2, 2], 'float32') net.attr_name = ParamBase([2, 2], 'float32')
with self.assertRaisesRegexp(KeyError, "already exists"): with self.assertRaisesRegex(KeyError, "already exists"):
net.register_buffer("attr_name", var) net.register_buffer("attr_name", var)
def test_register_buffer_same_name(self): def test_register_buffer_same_name(self):
......
...@@ -21,7 +21,7 @@ class CipherUtilsTestCase(unittest.TestCase): ...@@ -21,7 +21,7 @@ class CipherUtilsTestCase(unittest.TestCase):
def test_gen_key(self): def test_gen_key(self):
key1 = CipherUtils.gen_key(256) key1 = CipherUtils.gen_key(256)
key2 = CipherUtils.gen_key_to_file(256, "paddle_aes_test.keyfile") key2 = CipherUtils.gen_key_to_file(256, "paddle_aes_test.keyfile")
self.assertNotEquals(key1, key2) self.assertNotEqual(key1, key2)
key3 = CipherUtils.read_key_from_file("paddle_aes_test.keyfile") key3 = CipherUtils.read_key_from_file("paddle_aes_test.keyfile")
self.assertEqual(key2, key3) self.assertEqual(key2, key3)
self.assertEqual(len(key1), 32) self.assertEqual(len(key1), 32)
......
...@@ -28,11 +28,11 @@ class TestCifar10(unittest.TestCase): ...@@ -28,11 +28,11 @@ class TestCifar10(unittest.TestCase):
read_num = 0 read_num = 0
for data in cyclic_reader(): for data in cyclic_reader():
read_num += 1 read_num += 1
self.assertEquals(len(data), 2) self.assertEqual(len(data), 2)
if read_num == sample_num * 2: if read_num == sample_num * 2:
break break
self.assertEquals(read_num, sample_num * 2) self.assertEqual(read_num, sample_num * 2)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -153,7 +153,7 @@ class DatasetLoaderTestBase(unittest.TestCase): ...@@ -153,7 +153,7 @@ class DatasetLoaderTestBase(unittest.TestCase):
for _ in range(EPOCH_NUM): for _ in range(EPOCH_NUM):
has_complete_batch = False has_complete_batch = False
for batch_id, data in enumerate(dataloader): for batch_id, data in enumerate(dataloader):
self.assertEquals(len(places), len(data)) self.assertEqual(len(places), len(data))
for idx, data_on_each_device in enumerate(data): for idx, data_on_each_device in enumerate(data):
image = data_on_each_device["image"] image = data_on_each_device["image"]
label = data_on_each_device["label"] label = data_on_each_device["label"]
...@@ -166,7 +166,7 @@ class DatasetLoaderTestBase(unittest.TestCase): ...@@ -166,7 +166,7 @@ class DatasetLoaderTestBase(unittest.TestCase):
else: else:
batch_size = BATCH_SIZE batch_size = BATCH_SIZE
self.assertEquals(image.shape()[1:], IMAGE_SHAPE) self.assertEqual(image.shape()[1:], IMAGE_SHAPE)
self.assertTrue( self.assertTrue(
image._place()._equals(places[idx]), image._place()._equals(places[idx]),
msg=get_place_string(image._place()) msg=get_place_string(image._place())
...@@ -174,24 +174,24 @@ class DatasetLoaderTestBase(unittest.TestCase): ...@@ -174,24 +174,24 @@ class DatasetLoaderTestBase(unittest.TestCase):
+ get_place_string(places[idx]), + get_place_string(places[idx]),
) )
if self.drop_last: if self.drop_last:
self.assertEquals(image.shape()[0], BATCH_SIZE) self.assertEqual(image.shape()[0], BATCH_SIZE)
else: else:
self.assertTrue( self.assertTrue(
image.shape()[0] == BATCH_SIZE image.shape()[0] == BATCH_SIZE
or image.shape()[0] == BATCH_SIZE / 2 or image.shape()[0] == BATCH_SIZE / 2
) )
self.assertEquals(label.shape()[1:], LABEL_SHAPE) self.assertEqual(label.shape()[1:], LABEL_SHAPE)
self.assertTrue(label._place()._equals(places[idx])) self.assertTrue(label._place()._equals(places[idx]))
if self.drop_last: if self.drop_last:
self.assertEquals(label.shape()[0], BATCH_SIZE) self.assertEqual(label.shape()[0], BATCH_SIZE)
else: else:
self.assertTrue( self.assertTrue(
label.shape()[0] == BATCH_SIZE label.shape()[0] == BATCH_SIZE
or label.shape()[0] == BATCH_SIZE / 2 or label.shape()[0] == BATCH_SIZE / 2
) )
self.assertEquals(image.shape()[0], label.shape()[0]) self.assertEqual(image.shape()[0], label.shape()[0])
if image.shape()[0] == BATCH_SIZE: if image.shape()[0] == BATCH_SIZE:
has_complete_batch = True has_complete_batch = True
......
...@@ -39,21 +39,21 @@ class DeprecatedMemoryOptimizationInterfaceTest(unittest.TestCase): ...@@ -39,21 +39,21 @@ class DeprecatedMemoryOptimizationInterfaceTest(unittest.TestCase):
def assert_program_equal(self, prog1, prog2): def assert_program_equal(self, prog1, prog2):
block_num = prog1.num_blocks block_num = prog1.num_blocks
self.assertEquals(block_num, prog2.num_blocks) self.assertEqual(block_num, prog2.num_blocks)
for block_id in range(block_num): for block_id in range(block_num):
block1 = prog1.block(block_id) block1 = prog1.block(block_id)
block2 = prog2.block(block_id) block2 = prog2.block(block_id)
self.assertEquals(len(block1.ops), len(block2.ops)) self.assertEqual(len(block1.ops), len(block2.ops))
for op1, op2 in zip(block1.ops, block2.ops): for op1, op2 in zip(block1.ops, block2.ops):
self.assertEquals(op1.input_arg_names, op2.input_arg_names) self.assertEqual(op1.input_arg_names, op2.input_arg_names)
self.assertEquals(op1.output_arg_names, op2.output_arg_names) self.assertEqual(op1.output_arg_names, op2.output_arg_names)
self.assertEquals(len(block1.vars), len(block2.vars)) self.assertEqual(len(block1.vars), len(block2.vars))
for var1 in block1.vars.values(): for var1 in block1.vars.values():
self.assertTrue(var1.name in block2.vars) self.assertTrue(var1.name in block2.vars)
var2 = block2.vars.get(var1.name) var2 = block2.vars.get(var1.name)
self.assertEquals(var1.name, var2.name) self.assertEqual(var1.name, var2.name)
def test_main(self): def test_main(self):
prog1 = self.build_network(False) prog1 = self.build_network(False)
......
...@@ -210,7 +210,7 @@ class TestInplace(unittest.TestCase): ...@@ -210,7 +210,7 @@ class TestInplace(unittest.TestCase):
var_d = var_b**2 var_d = var_b**2
loss = paddle.nn.functional.relu(var_c + var_d) loss = paddle.nn.functional.relu(var_c + var_d)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format( "received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0 1, 0
......
...@@ -103,8 +103,8 @@ class TestTreeIndex(unittest.TestCase): ...@@ -103,8 +103,8 @@ class TestTreeIndex(unittest.TestCase):
node.id() for node in tree.get_nodes(travel_path_codes) node.id() for node in tree.get_nodes(travel_path_codes)
] ]
self.assertEquals(travel_path_ids + [travel_ids[-1]], travel_ids) self.assertEqual(travel_path_ids + [travel_ids[-1]], travel_ids)
self.assertEquals(travel_path_codes + [travel_codes[-1]], travel_codes) self.assertEqual(travel_path_codes + [travel_codes[-1]], travel_codes)
# get_children # get_children
children_codes = tree.get_children_codes(travel_codes[1], height - 1) children_codes = tree.get_children_codes(travel_codes[1], height - 1)
......
...@@ -75,12 +75,12 @@ class EagerScaleTestCase(unittest.TestCase): ...@@ -75,12 +75,12 @@ class EagerScaleTestCase(unittest.TestCase):
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True) out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad) self.assertIsNone(data_eager.grad)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
AssertionError, "The type of grad_tensor must be paddle.Tensor" AssertionError, "The type of grad_tensor must be paddle.Tensor"
): ):
out_eager.backward(grad_data, False) out_eager.backward(grad_data, False)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
AssertionError, AssertionError,
"Tensor shape not match, Tensor of grad_tensor /*", "Tensor shape not match, Tensor of grad_tensor /*",
): ):
...@@ -265,17 +265,17 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -265,17 +265,17 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
zero_dim_param = EagerParamBase(shape=[], dtype="float32") zero_dim_param = EagerParamBase(shape=[], dtype="float32")
self.assertEqual(zero_dim_param.shape, []) self.assertEqual(zero_dim_param.shape, [])
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "The shape of Parameter should not be None" ValueError, "The shape of Parameter should not be None"
): ):
eager_param = EagerParamBase(shape=None, dtype="float32") eager_param = EagerParamBase(shape=None, dtype="float32")
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "The dtype of Parameter should not be None" ValueError, "The dtype of Parameter should not be None"
): ):
eager_param = EagerParamBase(shape=[1, 1], dtype=None) eager_param = EagerParamBase(shape=[1, 1], dtype=None)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, ValueError,
"Each dimension of shape for Parameter must be greater than 0, but received /*", "Each dimension of shape for Parameter must be greater than 0, but received /*",
): ):
...@@ -285,7 +285,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -285,7 +285,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(eager_param.trainable) self.assertTrue(eager_param.trainable)
eager_param.trainable = False eager_param.trainable = False
self.assertFalse(eager_param.trainable) self.assertFalse(eager_param.trainable)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "The type of trainable MUST be bool, but the type is /*" ValueError, "The type of trainable MUST be bool, but the type is /*"
): ):
eager_param.trainable = "False" eager_param.trainable = "False"
...@@ -296,7 +296,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -296,7 +296,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(eager_param_2.trainable) self.assertTrue(eager_param_2.trainable)
eager_param_2.trainable = False eager_param_2.trainable = False
self.assertFalse(eager_param_2.trainable) self.assertFalse(eager_param_2.trainable)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "The type of trainable MUST be bool, but the type is /*" ValueError, "The type of trainable MUST be bool, but the type is /*"
): ):
eager_param_2.trainable = "False" eager_param_2.trainable = "False"
......
...@@ -79,9 +79,9 @@ class TestFuseAllReduceOpsBase(TestParallelExecutorBase): ...@@ -79,9 +79,9 @@ class TestFuseAllReduceOpsBase(TestParallelExecutorBase):
) )
for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
def optimizer(self, learning_rate=1e-3): def optimizer(self, learning_rate=1e-3):
optimizer = fluid.optimizer.SGD( optimizer = fluid.optimizer.SGD(
......
...@@ -75,9 +75,9 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -75,9 +75,9 @@ class TestMNIST(TestParallelExecutorBase):
) )
for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
def test_simple_fc_with_fuse_op(self): def test_simple_fc_with_fuse_op(self):
self._compare_fuse_elewise_add_act_ops(simple_fc_net, DeviceType.CUDA) self._compare_fuse_elewise_add_act_ops(simple_fc_net, DeviceType.CUDA)
......
...@@ -71,9 +71,9 @@ class TestFuseOptimizationOps(TestParallelExecutorBase): ...@@ -71,9 +71,9 @@ class TestFuseOptimizationOps(TestParallelExecutorBase):
) )
for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
def _decorate_compare_fused_optimizer_ops( def _decorate_compare_fused_optimizer_ops(
self, model, use_device, optimizer self, model, use_device, optimizer
......
...@@ -119,9 +119,9 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -119,9 +119,9 @@ class TestMNIST(TestParallelExecutorBase):
) )
for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) self.assertAlmostEqual(loss[0], loss[1], delta=1e-6)
def test_simple_depthwise_with_fuse_op(self): def test_simple_depthwise_with_fuse_op(self):
self._compare(simple_depthwise_net, DeviceType.CUDA) self._compare(simple_depthwise_net, DeviceType.CUDA)
......
...@@ -55,7 +55,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase): ...@@ -55,7 +55,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase):
self.assertFalse(name in g) self.assertFalse(name in g)
self.assertFalse(name in g.keys()) self.assertFalse(name in g.keys())
self.assertIsNone(g.get(name, None)) self.assertIsNone(g.get(name, None))
self.assertEquals(g.get(name, -1), -1) self.assertEqual(g.get(name, -1), -1)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -37,7 +37,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase):
def test_not_capacity(self): def test_not_capacity(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "Please give value to capacity." ValueError, "Please give value to capacity."
): ):
fluid.io.DataLoader.from_generator() fluid.io.DataLoader.from_generator()
......
...@@ -53,7 +53,7 @@ class TestInplace(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestInplace(unittest.TestCase):
var_d = var_b**2 var_d = var_b**2
loss = paddle.nn.functional.relu(var_c + var_d) loss = paddle.nn.functional.relu(var_c + var_d)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format( "received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0 1, 0
...@@ -161,7 +161,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -161,7 +161,7 @@ class TestDygraphInplace(unittest.TestCase):
self.inplace_api_processing(var_b) self.inplace_api_processing(var_b)
loss = paddle.nn.functional.relu(var_c) loss = paddle.nn.functional.relu(var_c)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format( "received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0 1, 0
......
...@@ -76,7 +76,7 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): ...@@ -76,7 +76,7 @@ class TestSaveInferenceModelAPIError(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(start_prog) exe.run(start_prog)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "not involved in the target_vars calculation" ValueError, "not involved in the target_vars calculation"
): ):
fluid.io.save_inference_model( fluid.io.save_inference_model(
......
...@@ -251,7 +251,7 @@ class TestOptimizer(unittest.TestCase): ...@@ -251,7 +251,7 @@ class TestOptimizer(unittest.TestCase):
) )
class TestSGDOptimizer(TestOptimizer): class TestSGDOptimizer(TestOptimizer):
def test_optimizer_multiblock_except(self): def test_optimizer_multiblock_except(self):
with self.assertRaisesRegexp( with self.assertRaisesRegex(
ValueError, "var param_y not in this block" ValueError, "var param_y not in this block"
): ):
self._check_grads(use_bf16=True) self._check_grads(use_bf16=True)
......
...@@ -174,12 +174,12 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -174,12 +174,12 @@ class TestMNIST(TestParallelExecutorBase):
use_parallel_executor=True, use_parallel_executor=True,
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(parallel_first_loss), np.mean(parallel_first_loss),
single_first_loss, single_first_loss,
delta=1e-6, delta=1e-6,
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(parallel_last_loss), single_last_loss, delta=1e-6 np.mean(parallel_last_loss), single_last_loss, delta=1e-6
) )
......
...@@ -70,12 +70,12 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -70,12 +70,12 @@ class TestMNIST(TestParallelExecutorBase):
use_parallel_executor=True, use_parallel_executor=True,
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(parallel_first_loss), np.mean(parallel_first_loss),
single_first_loss, single_first_loss,
delta=1e-6, delta=1e-6,
) )
self.assertAlmostEquals( self.assertAlmostEqual(
np.mean(parallel_last_loss), single_last_loss, delta=1e-6 np.mean(parallel_last_loss), single_last_loss, delta=1e-6
) )
......
...@@ -49,9 +49,9 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): ...@@ -49,9 +49,9 @@ class TestResnetWithReduceBase(TestParallelExecutorBase):
) )
for loss in zip(all_reduce_first_loss, reduce_first_loss): for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) self.assertAlmostEqual(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, reduce_last_loss): for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2) self.assertAlmostEqual(loss[0], loss[1], delta=loss[0] * delta2)
if not use_device: if not use_device:
return return
...@@ -87,19 +87,19 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): ...@@ -87,19 +87,19 @@ class TestResnetWithReduceBase(TestParallelExecutorBase):
) )
for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq): for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) self.assertAlmostEqual(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq): for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2) self.assertAlmostEqual(loss[0], loss[1], delta=loss[0] * delta2)
for loss in zip(reduce_first_loss, reduce_first_loss_seq): for loss in zip(reduce_first_loss, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) self.assertAlmostEqual(loss[0], loss[1], delta=1e-5)
for loss in zip(reduce_last_loss, reduce_last_loss_seq): for loss in zip(reduce_last_loss, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2) self.assertAlmostEqual(loss[0], loss[1], delta=loss[0] * delta2)
for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq): for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) self.assertAlmostEqual(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq): for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=loss[0] * delta2) self.assertAlmostEqual(loss[0], loss[1], delta=loss[0] * delta2)
class TestResnetWithReduceCPU(TestResnetWithReduceBase): class TestResnetWithReduceCPU(TestResnetWithReduceBase):
......
...@@ -440,7 +440,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -440,7 +440,7 @@ class TestPyLayer(unittest.TestCase):
data.stop_gradient = False data.stop_gradient = False
layer = Layer() layer = Layer()
z = layer(data) z = layer(data)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format( "received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0 1, 0
......
...@@ -52,7 +52,7 @@ class TestScope(unittest.TestCase): ...@@ -52,7 +52,7 @@ class TestScope(unittest.TestCase):
scope = paddle_c.Scope() scope = paddle_c.Scope()
# Delete the scope. # Delete the scope.
scope._remove_from_pool() scope._remove_from_pool()
with self.assertRaisesRegexp( with self.assertRaisesRegex(
Exception, "Deleting a nonexistent scope is not allowed*" Exception, "Deleting a nonexistent scope is not allowed*"
): ):
# It is not allowed to delete a nonexistent scope. # It is not allowed to delete a nonexistent scope.
......
...@@ -899,7 +899,7 @@ class TestSetValueValueShape5(TestSetValueApi): ...@@ -899,7 +899,7 @@ class TestSetValueValueShape5(TestSetValueApi):
# 4. Test error # 4. Test error
class TestError(TestSetValueBase): class TestError(TestSetValueBase):
def _value_type_error(self): def _value_type_error(self):
with self.assertRaisesRegexp( with self.assertRaisesRegex(
TypeError, TypeError,
"Only support to assign an integer, float, numpy.ndarray or paddle.Tensor", "Only support to assign an integer, float, numpy.ndarray or paddle.Tensor",
): ):
...@@ -908,7 +908,7 @@ class TestError(TestSetValueBase): ...@@ -908,7 +908,7 @@ class TestError(TestSetValueBase):
x[0] = value x[0] = value
def _dtype_error(self): def _dtype_error(self):
with self.assertRaisesRegexp( with self.assertRaisesRegex(
TypeError, TypeError,
"When assign a numpy.ndarray, integer or float to a paddle.Tensor, ", "When assign a numpy.ndarray, integer or float to a paddle.Tensor, ",
): ):
...@@ -916,17 +916,17 @@ class TestError(TestSetValueBase): ...@@ -916,17 +916,17 @@ class TestError(TestSetValueBase):
y[0] = 1 y[0] = 1
def _step_error(self): def _step_error(self):
with self.assertRaisesRegexp(ValueError, "step can not be 0"): with self.assertRaisesRegex(ValueError, "step can not be 0"):
x = paddle.ones(shape=self.shape, dtype=self.dtype) x = paddle.ones(shape=self.shape, dtype=self.dtype)
x[0:1:0] = self.value x[0:1:0] = self.value
def _ellipsis_error(self): def _ellipsis_error(self):
with self.assertRaisesRegexp( with self.assertRaisesRegex(
IndexError, "An index can only have a single ellipsis" IndexError, "An index can only have a single ellipsis"
): ):
x = paddle.ones(shape=self.shape, dtype=self.dtype) x = paddle.ones(shape=self.shape, dtype=self.dtype)
x[..., ...] = self.value x[..., ...] = self.value
with self.assertRaisesRegexp(ValueError, "the start or end is None"): with self.assertRaisesRegex(ValueError, "the start or end is None"):
x = paddle.ones(shape=self.shape, dtype=self.dtype) x = paddle.ones(shape=self.shape, dtype=self.dtype)
one = paddle.ones([1]) one = paddle.ones([1])
x[::one] = self.value x[::one] = self.value
......
...@@ -79,7 +79,7 @@ def case_generator(op_type, Xshape, diagonal, expected): ...@@ -79,7 +79,7 @@ def case_generator(op_type, Xshape, diagonal, expected):
paddle.enable_static() paddle.enable_static()
data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) data = fluid.data(shape=Xshape, dtype='float64', name=cls_name)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
eval(expected.split(':')[-1]), errmsg[expected] eval(expected.split(':')[-1]), errmsg[expected]
): ):
getattr(tensor, op_type)(x=data, diagonal=diagonal) getattr(tensor, op_type)(x=data, diagonal=diagonal)
......
...@@ -78,7 +78,7 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): ...@@ -78,7 +78,7 @@ class TestDygraphViewReuseAllocation(unittest.TestCase):
view_var_b[0] = 2.0 # var_b is modified inplace view_var_b[0] = 2.0 # var_b is modified inplace
loss = paddle.nn.functional.relu(var_c) loss = paddle.nn.functional.relu(var_c)
with self.assertRaisesRegexp( with self.assertRaisesRegex(
RuntimeError, RuntimeError,
"received tensor_version:{} != wrapper_version_snapshot:{}".format( "received tensor_version:{} != wrapper_version_snapshot:{}".format(
1, 0 1, 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册