未验证 提交 7ad6d9ea 编写于 作者: T Tony Cao 提交者: GitHub

[CodeStyle][E713] Convert 'not ... in ' into 'not in' (#46734)

* Update README.md

* Update README.md

* Fix E713: convert 'not ... in' to 'not in'
上级 fb35c6b7
...@@ -1481,7 +1481,7 @@ class ParameterServerLauncher(object): ...@@ -1481,7 +1481,7 @@ class ParameterServerLauncher(object):
format(self.node_ips, self.current_node_ip, self.node_rank)) format(self.node_ips, self.current_node_ip, self.node_rank))
def start_ps(self): def start_ps(self):
if not self.current_node_ip in self.node_ips: if self.current_node_ip not in self.node_ips:
return return
cluster = Cluster(hdfs=None) cluster = Cluster(hdfs=None)
server_rank = 0 server_rank = 0
......
...@@ -255,7 +255,7 @@ class PipelineOptimizer(MetaOptimizerBase): ...@@ -255,7 +255,7 @@ class PipelineOptimizer(MetaOptimizerBase):
if param_name in processed_param_name: continue if param_name in processed_param_name: continue
processed_param_name.add(param_name) processed_param_name.add(param_name)
grad_name = op_role_var[i + 1] grad_name = op_role_var[i + 1]
if not 'MERGED' in grad_name: grad_name += '@MERGED' if 'MERGED' not in grad_name: grad_name += '@MERGED'
grad = block.vars[grad_name] grad = block.vars[grad_name]
origin_param = origin_block.vars[op_role_var[i]] origin_param = origin_block.vars[op_role_var[i]]
if origin_param.is_distributed: if origin_param.is_distributed:
......
...@@ -345,7 +345,7 @@ class HybridParallelInferenceHelper(object): ...@@ -345,7 +345,7 @@ class HybridParallelInferenceHelper(object):
for each_block in self._main_program.blocks: for each_block in self._main_program.blocks:
for op in each_block.ops: for op in each_block.ops:
for var_name in op.input_arg_names: for var_name in op.input_arg_names:
if not var_name in params or var_name in self._param_device_map: if var_name not in params or var_name in self._param_device_map:
continue continue
device = op.attr(self._op_device_key) device = op.attr(self._op_device_key)
...@@ -404,7 +404,7 @@ class HybridParallelInferenceHelper(object): ...@@ -404,7 +404,7 @@ class HybridParallelInferenceHelper(object):
block._remove_op(op_idx) block._remove_op(op_idx)
for var_name in list(block.vars.keys()): for var_name in list(block.vars.keys()):
if not var_name in used_var_names: if var_name not in used_var_names:
block._remove_var(var_name) block._remove_var(var_name)
return used_var_names return used_var_names
......
...@@ -784,7 +784,7 @@ def union_forward_gradient_op(program_block_ops_list): ...@@ -784,7 +784,7 @@ def union_forward_gradient_op(program_block_ops_list):
block_op_list = {"forward": [], "backward": []} block_op_list = {"forward": [], "backward": []}
for op in program_block_ops_list[block_length // 2]: for op in program_block_ops_list[block_length // 2]:
if not "_grad" in op.type and not (op.type == "sum"): if "_grad" not in op.type and not (op.type == "sum"):
block_op_list["forward"].append(op) block_op_list["forward"].append(op)
else: else:
block_op_list["backward"].append(op) block_op_list["backward"].append(op)
...@@ -901,7 +901,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, ...@@ -901,7 +901,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail,
if var not in previous_block_private and var not in previous_block_entrance: if var not in previous_block_private and var not in previous_block_entrance:
previous_block_entrance.append(var) previous_block_entrance.append(var)
previous_block_exit.append(var) previous_block_exit.append(var)
if not var in current_block_entrance: if var not in current_block_entrance:
current_block_entrance.append(var) current_block_entrance.append(var)
for index in range(0, len(block_var_detail) - 1, 1): for index in range(0, len(block_var_detail) - 1, 1):
...@@ -918,7 +918,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, ...@@ -918,7 +918,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail,
need_add_vars = list(set(current_block_entrance) - set(exist_vars)) need_add_vars = list(set(current_block_entrance) - set(exist_vars))
need_ignore_vars = [] need_ignore_vars = []
for var in need_add_vars: for var in need_add_vars:
if not "@GRAD" in var: if "@GRAD" not in var:
need_ignore_vars.append(var) need_ignore_vars.append(var)
need_add_vars = list( need_add_vars = list(
set(need_add_vars).difference(set(need_ignore_vars))) set(need_add_vars).difference(set(need_ignore_vars)))
......
...@@ -31,9 +31,9 @@ class TestClipOneDNNOp(OpTest): ...@@ -31,9 +31,9 @@ class TestClipOneDNNOp(OpTest):
self.adjust_op_settings() self.adjust_op_settings()
self.min = self.attrs[ self.min = self.attrs[
'min'] if not 'Min' in self.inputs else self.inputs['Min'] 'min'] if 'Min' not in self.inputs else self.inputs['Min']
self.max = self.attrs[ self.max = self.attrs[
'max'] if not 'Max' in self.inputs else self.inputs['Max'] 'max'] if 'Max' not in self.inputs else self.inputs['Max']
self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)} self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)}
......
...@@ -1173,7 +1173,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -1173,7 +1173,7 @@ class TestRandomValue(unittest.TestCase):
return return
# Different GPU generate different random value. Only test V100 here. # Different GPU generate different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name(): if "V100" not in paddle.device.cuda.get_device_name():
return return
print("Test Fixed Random number on V100 GPU------>") print("Test Fixed Random number on V100 GPU------>")
......
...@@ -102,7 +102,7 @@ class TestExponentialAPI(unittest.TestCase): ...@@ -102,7 +102,7 @@ class TestExponentialAPI(unittest.TestCase):
return return
# Different GPU generatte different random value. Only test V100 here. # Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name(): if "V100" not in paddle.device.cuda.get_device_name():
return return
print("Test Fixed Random number on V100 GPU------>") print("Test Fixed Random number on V100 GPU------>")
......
...@@ -353,7 +353,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -353,7 +353,7 @@ class TestRandomValue(unittest.TestCase):
return return
# Different GPU generatte different random value. Only test V100 here. # Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name(): if "V100" not in paddle.device.cuda.get_device_name():
return return
def _check_random_value(dtype, expect, expect_mean, expect_std): def _check_random_value(dtype, expect, expect_mean, expect_std):
......
...@@ -250,7 +250,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -250,7 +250,7 @@ class TestRandomValue(unittest.TestCase):
return return
# Different GPU generatte different random value. Only test V100 here. # Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name(): if "V100" not in paddle.device.cuda.get_device_name():
return return
print("Test Fixed Random number on V100 GPU------>") print("Test Fixed Random number on V100 GPU------>")
......
...@@ -205,7 +205,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -205,7 +205,7 @@ class TestRandomValue(unittest.TestCase):
return return
# Different GPU generatte different random value. Only test V100 here. # Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name(): if "V100" not in paddle.device.cuda.get_device_name():
return return
print("Test Fixed Random number on GPU------>") print("Test Fixed Random number on GPU------>")
......
...@@ -606,7 +606,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -606,7 +606,7 @@ class TestRandomValue(unittest.TestCase):
return return
# Different GPU generate different random value. Only test V100 here. # Different GPU generate different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name(): if "V100" not in paddle.device.cuda.get_device_name():
return return
print("Test Fixed Random number on V100 GPU------>") print("Test Fixed Random number on V100 GPU------>")
......
...@@ -411,7 +411,7 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K): ...@@ -411,7 +411,7 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K):
plan.add_step(step) plan.add_step(step)
step = squeeze, [var2], var2, [-1, -2] step = squeeze, [var2], var2, [-1, -2]
plan.add_step(step) plan.add_step(step)
elif j1 + j2 == 0 and not -1 in np.concatenate( elif j1 + j2 == 0 and -1 not in np.concatenate(
(op1_vshape[K], op2_vshape[K])): (op1_vshape[K], op2_vshape[K])):
assert all(op1_vshape[K] == op2_vshape[K]) assert all(op1_vshape[K] == op2_vshape[K])
step = reshape, [ step = reshape, [
......
...@@ -166,7 +166,7 @@ def visit_all_module(mod, func): ...@@ -166,7 +166,7 @@ def visit_all_module(mod, func):
IdSet.add(instance_id) IdSet.add(instance_id)
visit_member(mod.__name__, instance, func) visit_member(mod.__name__, instance, func)
except: except:
if not cur_name in ErrorSet and not cur_name in skiplist: if cur_name not in ErrorSet and cur_name not in skiplist:
ErrorSet.add(cur_name) ErrorSet.add(cur_name)
......
...@@ -38,7 +38,7 @@ def parse_compat_registry(kernel_info): ...@@ -38,7 +38,7 @@ def parse_compat_registry(kernel_info):
def remove_grad_registry(kernels_registry): def remove_grad_registry(kernels_registry):
clean_kernel_registry = {} clean_kernel_registry = {}
for registry in kernels_registry: for registry in kernels_registry:
if (not "_grad" in registry): if "_grad" not in registry:
clean_kernel_registry[registry] = kernels_registry[registry] clean_kernel_registry[registry] = kernels_registry[registry]
return clean_kernel_registry return clean_kernel_registry
...@@ -47,7 +47,7 @@ def get_compat_kernels_info(): ...@@ -47,7 +47,7 @@ def get_compat_kernels_info():
kernels_info = {} kernels_info = {}
compat_files = os.listdir("../../paddle/phi/ops/compat") compat_files = os.listdir("../../paddle/phi/ops/compat")
for file_ in compat_files: for file_ in compat_files:
if not ".cc" in file_: if ".cc" not in file_:
compat_files.remove(file_) compat_files.remove(file_)
for file_ in compat_files: for file_ in compat_files:
......
...@@ -22,7 +22,7 @@ skip_list = [] ...@@ -22,7 +22,7 @@ skip_list = []
def remove_grad_kernel(kernels): def remove_grad_kernel(kernels):
clean_kernels = [] clean_kernels = []
for kernel_ in kernels: for kernel_ in kernels:
if (not "_grad" in kernel_): if "_grad" not in kernel_:
clean_kernels.append(kernel_) clean_kernels.append(kernel_)
return clean_kernels return clean_kernels
...@@ -37,7 +37,7 @@ def get_compat_kernels_info(register): ...@@ -37,7 +37,7 @@ def get_compat_kernels_info(register):
kernel_names = [] kernel_names = []
for dirpath, dirnames, filenames in os.walk("../../paddle/fluid/operators"): for dirpath, dirnames, filenames in os.walk("../../paddle/fluid/operators"):
for file_name in filenames: for file_name in filenames:
if not ".cc" in file_name: if ".cc" not in file_name:
continue continue
with open(os.path.join(dirpath, file_name)) as f: with open(os.path.join(dirpath, file_name)) as f:
txt = f.readlines() txt = f.readlines()
......
...@@ -113,7 +113,7 @@ def visit_all_module(mod): ...@@ -113,7 +113,7 @@ def visit_all_module(mod):
.format(member_name, instance.__name__), .format(member_name, instance.__name__),
file=sys.stderr) file=sys.stderr)
except: except:
if not cur_name in ErrorSet and not cur_name in skiplist: if cur_name not in ErrorSet and cur_name not in skiplist:
ErrorSet.add(cur_name) ErrorSet.add(cur_name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册