diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 633a65386c485265a35cec89b5ef5bbcd5a9870b..d203db4710ccf980072d314eb259c1696d3cf628 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -1481,7 +1481,7 @@ class ParameterServerLauncher(object): format(self.node_ips, self.current_node_ip, self.node_rank)) def start_ps(self): - if not self.current_node_ip in self.node_ips: + if self.current_node_ip not in self.node_ips: return cluster = Cluster(hdfs=None) server_rank = 0 diff --git a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py index 941accf029dfa070558dd103552717a320f2fa23..09748dfee5361ed049b65b81d979061eb0f33c9f 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py @@ -255,7 +255,7 @@ class PipelineOptimizer(MetaOptimizerBase): if param_name in processed_param_name: continue processed_param_name.add(param_name) grad_name = op_role_var[i + 1] - if not 'MERGED' in grad_name: grad_name += '@MERGED' + if 'MERGED' not in grad_name: grad_name += '@MERGED' grad = block.vars[grad_name] origin_param = origin_block.vars[op_role_var[i]] if origin_param.is_distributed: diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 04955db3c1811570c0a9c76074a4b61c27730018..5eb770875c96de4e8b6a78f34ecf35775f8c2187 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -345,7 +345,7 @@ class HybridParallelInferenceHelper(object): for each_block in self._main_program.blocks: for op in each_block.ops: for var_name in op.input_arg_names: - if not var_name in params or var_name in self._param_device_map: + if var_name not in params or var_name in self._param_device_map: continue device = op.attr(self._op_device_key) @@ -404,7 +404,7 @@ class HybridParallelInferenceHelper(object): block._remove_op(op_idx) for var_name in list(block.vars.keys()): - if not var_name in used_var_names: + if var_name not in used_var_names: block._remove_var(var_name) return used_var_names diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 3459f7785420a3ad68facb5cae13d85e703cb707..a66712c14cabd188f0f2a06bc88c35073594902f 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -784,7 +784,7 @@ def union_forward_gradient_op(program_block_ops_list): block_op_list = {"forward": [], "backward": []} for op in program_block_ops_list[block_length // 2]: - if not "_grad" in op.type and not (op.type == "sum"): + if "_grad" not in op.type and not (op.type == "sum"): block_op_list["forward"].append(op) else: block_op_list["backward"].append(op) @@ -901,7 +901,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, if var not in previous_block_private and var not in previous_block_entrance: previous_block_entrance.append(var) previous_block_exit.append(var) - if not var in current_block_entrance: + if var not in current_block_entrance: current_block_entrance.append(var) for index in range(0, len(block_var_detail) - 1, 1): @@ -918,7 +918,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, need_add_vars = list(set(current_block_entrance) - set(exist_vars)) need_ignore_vars = [] for var in need_add_vars: - if not "@GRAD" in var: + if "@GRAD" not in var: need_ignore_vars.append(var) need_add_vars = list( set(need_add_vars).difference(set(need_ignore_vars))) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py index adfd0613bd3f46ccf6ec4176ed303414068b1b80..d3179373ff7d63a481ded9b4889491ab1abf9502 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py @@ -31,9 +31,9 @@ class TestClipOneDNNOp(OpTest): self.adjust_op_settings() self.min = self.attrs[ - 'min'] if not 'Min' in self.inputs else self.inputs['Min'] + 'min'] if 'Min' not in self.inputs else self.inputs['Min'] self.max = self.attrs[ - 'max'] if not 'Max' in self.inputs else self.inputs['Max'] + 'max'] if 'Max' not in self.inputs else self.inputs['Max'] self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)} diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index f458f28d267d947718e5ebfbc9a608c3b8fa0f56..2aa848d13858f31f03504f94dbe6618d41c93aa0 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -1173,7 +1173,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generate different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index 3b2bb42d57c2c2039528a2cb58a63b93a75eeb0b..deb7b606c8296cd1029e21f71bdf4078ab791510 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -102,7 +102,7 @@ class TestExponentialAPI(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 7314841ca8b75317bbcd8cb29cee429899da1092..331aa0f20fabb8f8def06c18b56c7e1168aff00f 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -353,7 +353,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return def _check_random_value(dtype, expect, expect_mean, expect_std): diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index 0e73e3c5efec4d8ffa4a8e1bc4dfe16efd86caaa..71e6cca29070e7feb81548631e8db861e10d5e00 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -250,7 +250,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index dfa15f98d9f9920054d5b4a634118ea62042d7a3..a86f042b614f91a74b67aab19c220dde47c67fd7 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -205,7 +205,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index eb72beed2c8ed1aa4fd7897f9cd2e06761e904fc..285e4f4f20b5c8d14952b990c80f5485c6bc7bae 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -606,7 +606,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generate different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/tensor/einsum.py b/python/paddle/tensor/einsum.py index 1903c30824e06f44adc7a6bcfe5145742d79f9a1..1ea3aa7cfa70a6e639d61f3e489c0ba610c0de90 100644 --- a/python/paddle/tensor/einsum.py +++ b/python/paddle/tensor/einsum.py @@ -411,7 +411,7 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K): plan.add_step(step) step = squeeze, [var2], var2, [-1, -2] plan.add_step(step) - elif j1 + j2 == 0 and not -1 in np.concatenate( + elif j1 + j2 == 0 and -1 not in np.concatenate( (op1_vshape[K], op2_vshape[K])): assert all(op1_vshape[K] == op2_vshape[K]) step = reshape, [ diff --git a/tools/count_api_without_core_ops.py b/tools/count_api_without_core_ops.py index f768bdf2ae46fd99be01fc4acc0ea2885706e1ee..4cb6262cac38abd1384745ded13dd04c5fe3b286 100644 --- a/tools/count_api_without_core_ops.py +++ b/tools/count_api_without_core_ops.py @@ -166,7 +166,7 @@ def visit_all_module(mod, func): IdSet.add(instance_id) visit_member(mod.__name__, instance, func) except: - if not cur_name in ErrorSet and not cur_name in skiplist: + if cur_name not in ErrorSet and cur_name not in skiplist: ErrorSet.add(cur_name) diff --git a/tools/infrt/get_compat_kernel_signature.py b/tools/infrt/get_compat_kernel_signature.py index 9e112cafc8514980ad5b73c100cb42cada82744f..1ac638aea89225a4d5e153476a1a18d8611669a1 100644 --- a/tools/infrt/get_compat_kernel_signature.py +++ b/tools/infrt/get_compat_kernel_signature.py @@ -38,7 +38,7 @@ def parse_compat_registry(kernel_info): def remove_grad_registry(kernels_registry): clean_kernel_registry = {} for registry in kernels_registry: - if (not "_grad" in registry): + if "_grad" not in registry: clean_kernel_registry[registry] = kernels_registry[registry] return clean_kernel_registry @@ -47,7 +47,7 @@ def get_compat_kernels_info(): kernels_info = {} compat_files = os.listdir("../../paddle/phi/ops/compat") for file_ in compat_files: - if not ".cc" in file_: + if ".cc" not in file_: compat_files.remove(file_) for file_ in compat_files: diff --git a/tools/infrt/print_kernel_pass_info.py b/tools/infrt/print_kernel_pass_info.py index ef9b0b59f37ce6749c5a7d49686b07d034df474b..d3673e9c907f04140b4f3f5743b5133de6ffc570 100644 --- a/tools/infrt/print_kernel_pass_info.py +++ b/tools/infrt/print_kernel_pass_info.py @@ -22,7 +22,7 @@ skip_list = [] def remove_grad_kernel(kernels): clean_kernels = [] for kernel_ in kernels: - if (not "_grad" in kernel_): + if "_grad" not in kernel_: clean_kernels.append(kernel_) return clean_kernels @@ -37,7 +37,7 @@ def get_compat_kernels_info(register): kernel_names = [] for dirpath, dirnames, filenames in os.walk("../../paddle/fluid/operators"): for file_name in filenames: - if not ".cc" in file_name: + if ".cc" not in file_name: continue with open(os.path.join(dirpath, file_name)) as f: txt = f.readlines() diff --git a/tools/print_signatures.py b/tools/print_signatures.py index 5a6ad44e45d56c0e67471c1719fdc7b98a210379..93f5815bc906980bdb1992bec70c800bd2ff349a 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -113,7 +113,7 @@ def visit_all_module(mod): .format(member_name, instance.__name__), file=sys.stderr) except: - if not cur_name in ErrorSet and not cur_name in skiplist: + if cur_name not in ErrorSet and cur_name not in skiplist: ErrorSet.add(cur_name)