From 7ad6d9ea48ad213c696ef9bc3866577dbdcc2598 Mon Sep 17 00:00:00 2001 From: Tony Cao <57024921+caolonghao@users.noreply.github.com> Date: Tue, 11 Oct 2022 11:32:23 +0800 Subject: [PATCH] [CodeStyle][E713] Convert 'not ... in ' into 'not in' (#46734) * Update README.md * Update README.md * Fix E713: convert 'not ... in' to 'not in' --- python/paddle/distributed/fleet/launch_utils.py | 2 +- .../distributed/fleet/meta_optimizers/pipeline_optimizer.py | 2 +- .../distributed/fleet/utils/hybrid_parallel_inference.py | 4 ++-- python/paddle/distributed/ps/utils/public.py | 6 +++--- .../fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py | 4 ++-- python/paddle/fluid/tests/unittests/test_dropout_op.py | 2 +- python/paddle/fluid/tests/unittests/test_exponential_op.py | 2 +- .../paddle/fluid/tests/unittests/test_gaussian_random_op.py | 2 +- python/paddle/fluid/tests/unittests/test_multinomial_op.py | 2 +- python/paddle/fluid/tests/unittests/test_randint_op.py | 2 +- .../paddle/fluid/tests/unittests/test_uniform_random_op.py | 2 +- python/paddle/tensor/einsum.py | 2 +- tools/count_api_without_core_ops.py | 2 +- tools/infrt/get_compat_kernel_signature.py | 4 ++-- tools/infrt/print_kernel_pass_info.py | 4 ++-- tools/print_signatures.py | 2 +- 16 files changed, 22 insertions(+), 22 deletions(-) diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 633a65386c..d203db4710 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -1481,7 +1481,7 @@ class ParameterServerLauncher(object): format(self.node_ips, self.current_node_ip, self.node_rank)) def start_ps(self): - if not self.current_node_ip in self.node_ips: + if self.current_node_ip not in self.node_ips: return cluster = Cluster(hdfs=None) server_rank = 0 diff --git a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py index 941accf029..09748dfee5 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py @@ -255,7 +255,7 @@ class PipelineOptimizer(MetaOptimizerBase): if param_name in processed_param_name: continue processed_param_name.add(param_name) grad_name = op_role_var[i + 1] - if not 'MERGED' in grad_name: grad_name += '@MERGED' + if 'MERGED' not in grad_name: grad_name += '@MERGED' grad = block.vars[grad_name] origin_param = origin_block.vars[op_role_var[i]] if origin_param.is_distributed: diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 04955db3c1..5eb770875c 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -345,7 +345,7 @@ class HybridParallelInferenceHelper(object): for each_block in self._main_program.blocks: for op in each_block.ops: for var_name in op.input_arg_names: - if not var_name in params or var_name in self._param_device_map: + if var_name not in params or var_name in self._param_device_map: continue device = op.attr(self._op_device_key) @@ -404,7 +404,7 @@ class HybridParallelInferenceHelper(object): block._remove_op(op_idx) for var_name in list(block.vars.keys()): - if not var_name in used_var_names: + if var_name not in used_var_names: block._remove_var(var_name) return used_var_names diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 3459f77854..a66712c14c 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -784,7 +784,7 @@ def union_forward_gradient_op(program_block_ops_list): block_op_list = {"forward": [], "backward": []} for op in program_block_ops_list[block_length // 2]: - if not "_grad" in op.type and not (op.type == "sum"): + if "_grad" not in op.type and not (op.type == "sum"): block_op_list["forward"].append(op) else: block_op_list["backward"].append(op) @@ -901,7 +901,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, if var not in previous_block_private and var not in previous_block_entrance: previous_block_entrance.append(var) previous_block_exit.append(var) - if not var in current_block_entrance: + if var not in current_block_entrance: current_block_entrance.append(var) for index in range(0, len(block_var_detail) - 1, 1): @@ -918,7 +918,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, need_add_vars = list(set(current_block_entrance) - set(exist_vars)) need_ignore_vars = [] for var in need_add_vars: - if not "@GRAD" in var: + if "@GRAD" not in var: need_ignore_vars.append(var) need_add_vars = list( set(need_add_vars).difference(set(need_ignore_vars))) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py index adfd0613bd..d3179373ff 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py @@ -31,9 +31,9 @@ class TestClipOneDNNOp(OpTest): self.adjust_op_settings() self.min = self.attrs[ - 'min'] if not 'Min' in self.inputs else self.inputs['Min'] + 'min'] if 'Min' not in self.inputs else self.inputs['Min'] self.max = self.attrs[ - 'max'] if not 'Max' in self.inputs else self.inputs['Max'] + 'max'] if 'Max' not in self.inputs else self.inputs['Max'] self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)} diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index f458f28d26..2aa848d138 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -1173,7 +1173,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generate different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index 3b2bb42d57..deb7b606c8 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -102,7 +102,7 @@ class TestExponentialAPI(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 7314841ca8..331aa0f20f 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -353,7 +353,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return def _check_random_value(dtype, expect, expect_mean, expect_std): diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index 0e73e3c5ef..71e6cca290 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -250,7 +250,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index dfa15f98d9..a86f042b61 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -205,7 +205,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generatte different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on GPU------>") diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index eb72beed2c..285e4f4f20 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -606,7 +606,7 @@ class TestRandomValue(unittest.TestCase): return # Different GPU generate different random value. Only test V100 here. - if not "V100" in paddle.device.cuda.get_device_name(): + if "V100" not in paddle.device.cuda.get_device_name(): return print("Test Fixed Random number on V100 GPU------>") diff --git a/python/paddle/tensor/einsum.py b/python/paddle/tensor/einsum.py index 1903c30824..1ea3aa7cfa 100644 --- a/python/paddle/tensor/einsum.py +++ b/python/paddle/tensor/einsum.py @@ -411,7 +411,7 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K): plan.add_step(step) step = squeeze, [var2], var2, [-1, -2] plan.add_step(step) - elif j1 + j2 == 0 and not -1 in np.concatenate( + elif j1 + j2 == 0 and -1 not in np.concatenate( (op1_vshape[K], op2_vshape[K])): assert all(op1_vshape[K] == op2_vshape[K]) step = reshape, [ diff --git a/tools/count_api_without_core_ops.py b/tools/count_api_without_core_ops.py index f768bdf2ae..4cb6262cac 100644 --- a/tools/count_api_without_core_ops.py +++ b/tools/count_api_without_core_ops.py @@ -166,7 +166,7 @@ def visit_all_module(mod, func): IdSet.add(instance_id) visit_member(mod.__name__, instance, func) except: - if not cur_name in ErrorSet and not cur_name in skiplist: + if cur_name not in ErrorSet and cur_name not in skiplist: ErrorSet.add(cur_name) diff --git a/tools/infrt/get_compat_kernel_signature.py b/tools/infrt/get_compat_kernel_signature.py index 9e112cafc8..1ac638aea8 100644 --- a/tools/infrt/get_compat_kernel_signature.py +++ b/tools/infrt/get_compat_kernel_signature.py @@ -38,7 +38,7 @@ def parse_compat_registry(kernel_info): def remove_grad_registry(kernels_registry): clean_kernel_registry = {} for registry in kernels_registry: - if (not "_grad" in registry): + if "_grad" not in registry: clean_kernel_registry[registry] = kernels_registry[registry] return clean_kernel_registry @@ -47,7 +47,7 @@ def get_compat_kernels_info(): kernels_info = {} compat_files = os.listdir("../../paddle/phi/ops/compat") for file_ in compat_files: - if not ".cc" in file_: + if ".cc" not in file_: compat_files.remove(file_) for file_ in compat_files: diff --git a/tools/infrt/print_kernel_pass_info.py b/tools/infrt/print_kernel_pass_info.py index ef9b0b59f3..d3673e9c90 100644 --- a/tools/infrt/print_kernel_pass_info.py +++ b/tools/infrt/print_kernel_pass_info.py @@ -22,7 +22,7 @@ skip_list = [] def remove_grad_kernel(kernels): clean_kernels = [] for kernel_ in kernels: - if (not "_grad" in kernel_): + if "_grad" not in kernel_: clean_kernels.append(kernel_) return clean_kernels @@ -37,7 +37,7 @@ def get_compat_kernels_info(register): kernel_names = [] for dirpath, dirnames, filenames in os.walk("../../paddle/fluid/operators"): for file_name in filenames: - if not ".cc" in file_name: + if ".cc" not in file_name: continue with open(os.path.join(dirpath, file_name)) as f: txt = f.readlines() diff --git a/tools/print_signatures.py b/tools/print_signatures.py index 5a6ad44e45..93f5815bc9 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -113,7 +113,7 @@ def visit_all_module(mod): .format(member_name, instance.__name__), file=sys.stderr) except: - if not cur_name in ErrorSet and not cur_name in skiplist: + if cur_name not in ErrorSet and cur_name not in skiplist: ErrorSet.add(cur_name) -- GitLab