diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 19a24488f9442943d361fda9f00624e2d6c1a071..b7070a7237896ad0c55e379bd30edffd66beb3d2 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -76,7 +76,7 @@ def _is_cpuonly(backend): if (backend in ['auto', 'nccl', 'bkcl', 'hccl', 'heter', 'cncl'] and (core.is_compiled_with_cuda() or core.is_compiled_with_xpu() or core.is_compiled_with_npu() - or core.is_compiled_with_mlu())) or backend is 'xccl': + or core.is_compiled_with_mlu())) or backend == 'xccl': # passes 'auto' and can use cuda or xpu, use the default logics. so return False return False diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py index 18f3252e6e1454fea3217a65f8437557e1d74640..762a16b61ff829fa714783c07feee9bf7898d24a 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py @@ -293,7 +293,7 @@ class TestStaticModel(unittest.TestCase): # For PE if use_pe: places = paddle.static.cpu_places( - ) if device is 'cpu' else paddle.static.cuda_places() + ) if device == 'cpu' else paddle.static.cuda_places() main_program = paddle.static.CompiledProgram( paddle.static.default_main_program( )).with_data_parallel(loss_name=loss.name, diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py index 14c8bb0c6f671eaf01d927e7a2492375dc30b1ab..32e34091d38a6999a7768bd83e03946f4d3d39fb 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py @@ -72,7 +72,7 @@ def custom_relu_static_pe(func, device, dtype, np_x, use_func=True): paddle.enable_static() paddle.set_device(device) - places = static.cpu_places() if device is 'cpu' else static.cuda_places() + places = static.cpu_places() if device == 'cpu' else static.cuda_places() with static.scope_guard(static.Scope()): with static.program_guard(static.Program()): x = static.data(name='X', shape=[None, 8], dtype=dtype) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py b/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py index 6bb5490f8ce332b3516e1595f8c4c93af0b73755..b8bbe582cbecccedd8e5a905e95a1b66a486a0d6 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py @@ -278,7 +278,7 @@ class TransformerDecoder(nn.Layer): self.num_layers = num_layers self.layers = decoder_layers self.norm = norm - if norm is "LayerNorm": + if norm == "LayerNorm": self.norm = nn.LayerNorm(hidden_size) elif norm is not None: raise ValueError("Only support LayerNorm") diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py index 258bab668bdd930a07e0233189686d6f008352b4..5bb31b92cfc994ba399aa67dbcb51a9bf4495e0e 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py @@ -266,7 +266,7 @@ class TransformerDecoder(nn.Layer): self.num_layers = num_layers self.layers = decoder_layers self.norm = norm - if norm is "LayerNorm": + if norm == "LayerNorm": self.norm = nn.LayerNorm(hidden_size) elif norm is not None: raise ValueError("Only support LayerNorm") diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py index a31be0a30723860c2866486f6248fe0e7c6dd5af..1999a9c6ffd99d67b45b3ba9104af6d313c9620a 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py @@ -313,7 +313,7 @@ class TransformerDecoder(nn.Layer): self.num_layers = num_layers self.layers = decoder_layers self.norm = norm - if norm is "LayerNorm": + if norm == "LayerNorm": self.norm = nn.LayerNorm(hidden_size) elif norm is not None: raise ValueError("Only support LayerNorm") diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py index 2843704b1b2936df2e155e0d542b17f46ccd8ee6..8c6b3421e24a995e212ab0b107f77a1d1d259d5f 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py @@ -26,7 +26,7 @@ from paddle import enable_static def _lookup(weights, ids, flat_ids, op_version="lookup_table"): w_shape = weights.shape - out_shape = list(ids.shape[:-1]) if op_version is "lookup_table" else list( + out_shape = list(ids.shape[:-1]) if op_version == "lookup_table" else list( ids.shape) out_shape.append(w_shape[-1]) out = weights[flat_ids].reshape(out_shape) @@ -36,7 +36,7 @@ def _lookup(weights, ids, flat_ids, op_version="lookup_table"): def _get_grad(weights, ids, flat_ids, op_version="lookup_table"): w_shape = weights.shape w_grad = np.zeros((w_shape), dtype=weights.dtype) - out_shape = list(ids.shape[:-1]) if op_version is "lookup_table" else list( + out_shape = list(ids.shape[:-1]) if op_version == "lookup_table" else list( ids.shape) out_grad_shape = (np.prod(out_shape), w_shape[-1]) out_grad = weights[flat_ids].reshape(out_grad_shape) diff --git a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py index 1fe81d8a97d10c2d0dbe5dc5420c27d4134bcdf3..f7509ba1c8b09034e3500e52d5c09ef949a5f293 100644 --- a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py @@ -44,13 +44,13 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): def cal_np_out_and_gradient(self): def _cal_np_out_and_gradient(func): - if func is 'amax': + if func == 'amax': out = np.amax(self.x_np, axis=self.axis, keepdims=self.keepdim) - elif func is 'amin': + elif func == 'amin': out = np.amin(self.x_np, axis=self.axis, keepdims=self.keepdim) - elif func is 'max': + elif func == 'max': out = np.max(self.x_np, axis=self.axis, keepdims=self.keepdim) - elif func is 'min': + elif func == 'min': out = np.min(self.x_np, axis=self.axis, keepdims=self.keepdim) else: print('This unittest only test amax/amin/max/min, but now is', @@ -74,13 +74,13 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): _cal_np_out_and_gradient('min') def _choose_paddle_func(self, func, x): - if func is 'amax': + if func == 'amax': out = paddle.amax(x, self.axis, self.keepdim) - elif func is 'amin': + elif func == 'amin': out = paddle.amin(x, self.axis, self.keepdim) - elif func is 'max': + elif func == 'max': out = paddle.max(x, self.axis, self.keepdim) - elif func is 'min': + elif func == 'min': out = paddle.min(x, self.axis, self.keepdim) else: print('This unittest only test amax/amin/max/min, but now is', func) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index b2445b6ec8a0b3d28b827f95bab8b1fc4de22646..3695de024f4c65e6387d53ec68df31f6bc39486e 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -100,7 +100,7 @@ class TestFetchAndFeed(unittest.TestCase): for k, v in all_vars.items(): if ('tmp' not in k) and ( - k[0] is not '_' or v.persistable + k[0] != '_' or v.persistable ) and v.type == core.VarDesc.VarType.LOD_TENSOR: fetch_list.append(k) diff --git a/python/paddle/fluid/tests/unittests/test_real_imag_op.py b/python/paddle/fluid/tests/unittests/test_real_imag_op.py index c1d3764f51ace78530398775504d55648f044dc6..8b0a0ad635b1add82c4c05f3d6c2824170bd2599 100644 --- a/python/paddle/fluid/tests/unittests/test_real_imag_op.py +++ b/python/paddle/fluid/tests/unittests/test_real_imag_op.py @@ -131,7 +131,7 @@ class TestRealAPI(unittest.TestCase): res = paddle_apis[self.api](input_t).numpy() np.testing.assert_array_equal(np_res, res) res_t = input_t.real().numpy( - ) if self.api is "real" else input_t.imag().numpy() + ) if self.api == "real" else input_t.imag().numpy() np.testing.assert_array_equal(np_res, res_t) def test_name_argument(self):