未验证 提交 7ad6d9ea 编写于 作者: T Tony Cao 提交者: GitHub

[CodeStyle][E713] Convert 'not ... in ' into 'not in' (#46734)

* Update README.md

* Update README.md

* Fix E713: convert 'not ... in' to 'not in'
上级 fb35c6b7
......@@ -1481,7 +1481,7 @@ class ParameterServerLauncher(object):
format(self.node_ips, self.current_node_ip, self.node_rank))
def start_ps(self):
if not self.current_node_ip in self.node_ips:
if self.current_node_ip not in self.node_ips:
return
cluster = Cluster(hdfs=None)
server_rank = 0
......
......@@ -255,7 +255,7 @@ class PipelineOptimizer(MetaOptimizerBase):
if param_name in processed_param_name: continue
processed_param_name.add(param_name)
grad_name = op_role_var[i + 1]
if not 'MERGED' in grad_name: grad_name += '@MERGED'
if 'MERGED' not in grad_name: grad_name += '@MERGED'
grad = block.vars[grad_name]
origin_param = origin_block.vars[op_role_var[i]]
if origin_param.is_distributed:
......
......@@ -345,7 +345,7 @@ class HybridParallelInferenceHelper(object):
for each_block in self._main_program.blocks:
for op in each_block.ops:
for var_name in op.input_arg_names:
if not var_name in params or var_name in self._param_device_map:
if var_name not in params or var_name in self._param_device_map:
continue
device = op.attr(self._op_device_key)
......@@ -404,7 +404,7 @@ class HybridParallelInferenceHelper(object):
block._remove_op(op_idx)
for var_name in list(block.vars.keys()):
if not var_name in used_var_names:
if var_name not in used_var_names:
block._remove_var(var_name)
return used_var_names
......
......@@ -784,7 +784,7 @@ def union_forward_gradient_op(program_block_ops_list):
block_op_list = {"forward": [], "backward": []}
for op in program_block_ops_list[block_length // 2]:
if not "_grad" in op.type and not (op.type == "sum"):
if "_grad" not in op.type and not (op.type == "sum"):
block_op_list["forward"].append(op)
else:
block_op_list["backward"].append(op)
......@@ -901,7 +901,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail,
if var not in previous_block_private and var not in previous_block_entrance:
previous_block_entrance.append(var)
previous_block_exit.append(var)
if not var in current_block_entrance:
if var not in current_block_entrance:
current_block_entrance.append(var)
for index in range(0, len(block_var_detail) - 1, 1):
......@@ -918,7 +918,7 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail,
need_add_vars = list(set(current_block_entrance) - set(exist_vars))
need_ignore_vars = []
for var in need_add_vars:
if not "@GRAD" in var:
if "@GRAD" not in var:
need_ignore_vars.append(var)
need_add_vars = list(
set(need_add_vars).difference(set(need_ignore_vars)))
......
......@@ -31,9 +31,9 @@ class TestClipOneDNNOp(OpTest):
self.adjust_op_settings()
self.min = self.attrs[
'min'] if not 'Min' in self.inputs else self.inputs['Min']
'min'] if 'Min' not in self.inputs else self.inputs['Min']
self.max = self.attrs[
'max'] if not 'Max' in self.inputs else self.inputs['Max']
'max'] if 'Max' not in self.inputs else self.inputs['Max']
self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)}
......
......@@ -1173,7 +1173,7 @@ class TestRandomValue(unittest.TestCase):
return
# Different GPU generate different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
if "V100" not in paddle.device.cuda.get_device_name():
return
print("Test Fixed Random number on V100 GPU------>")
......
......@@ -102,7 +102,7 @@ class TestExponentialAPI(unittest.TestCase):
return
# Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
if "V100" not in paddle.device.cuda.get_device_name():
return
print("Test Fixed Random number on V100 GPU------>")
......
......@@ -353,7 +353,7 @@ class TestRandomValue(unittest.TestCase):
return
# Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
if "V100" not in paddle.device.cuda.get_device_name():
return
def _check_random_value(dtype, expect, expect_mean, expect_std):
......
......@@ -250,7 +250,7 @@ class TestRandomValue(unittest.TestCase):
return
# Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
if "V100" not in paddle.device.cuda.get_device_name():
return
print("Test Fixed Random number on V100 GPU------>")
......
......@@ -205,7 +205,7 @@ class TestRandomValue(unittest.TestCase):
return
# Different GPU generatte different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
if "V100" not in paddle.device.cuda.get_device_name():
return
print("Test Fixed Random number on GPU------>")
......
......@@ -606,7 +606,7 @@ class TestRandomValue(unittest.TestCase):
return
# Different GPU generate different random value. Only test V100 here.
if not "V100" in paddle.device.cuda.get_device_name():
if "V100" not in paddle.device.cuda.get_device_name():
return
print("Test Fixed Random number on V100 GPU------>")
......
......@@ -411,7 +411,7 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K):
plan.add_step(step)
step = squeeze, [var2], var2, [-1, -2]
plan.add_step(step)
elif j1 + j2 == 0 and not -1 in np.concatenate(
elif j1 + j2 == 0 and -1 not in np.concatenate(
(op1_vshape[K], op2_vshape[K])):
assert all(op1_vshape[K] == op2_vshape[K])
step = reshape, [
......
......@@ -166,7 +166,7 @@ def visit_all_module(mod, func):
IdSet.add(instance_id)
visit_member(mod.__name__, instance, func)
except:
if not cur_name in ErrorSet and not cur_name in skiplist:
if cur_name not in ErrorSet and cur_name not in skiplist:
ErrorSet.add(cur_name)
......
......@@ -38,7 +38,7 @@ def parse_compat_registry(kernel_info):
def remove_grad_registry(kernels_registry):
clean_kernel_registry = {}
for registry in kernels_registry:
if (not "_grad" in registry):
if "_grad" not in registry:
clean_kernel_registry[registry] = kernels_registry[registry]
return clean_kernel_registry
......@@ -47,7 +47,7 @@ def get_compat_kernels_info():
kernels_info = {}
compat_files = os.listdir("../../paddle/phi/ops/compat")
for file_ in compat_files:
if not ".cc" in file_:
if ".cc" not in file_:
compat_files.remove(file_)
for file_ in compat_files:
......
......@@ -22,7 +22,7 @@ skip_list = []
def remove_grad_kernel(kernels):
clean_kernels = []
for kernel_ in kernels:
if (not "_grad" in kernel_):
if "_grad" not in kernel_:
clean_kernels.append(kernel_)
return clean_kernels
......@@ -37,7 +37,7 @@ def get_compat_kernels_info(register):
kernel_names = []
for dirpath, dirnames, filenames in os.walk("../../paddle/fluid/operators"):
for file_name in filenames:
if not ".cc" in file_name:
if ".cc" not in file_name:
continue
with open(os.path.join(dirpath, file_name)) as f:
txt = f.readlines()
......
......@@ -113,7 +113,7 @@ def visit_all_module(mod):
.format(member_name, instance.__name__),
file=sys.stderr)
except:
if not cur_name in ErrorSet and not cur_name in skiplist:
if cur_name not in ErrorSet and cur_name not in skiplist:
ErrorSet.add(cur_name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册