未验证 提交 5194f565 编写于 作者: T Tony Cao 提交者: GitHub

[CodeStyle][F632] Replace 'is' and 'is not' with == and != respectively (#46708)

* Update README.md

* Update README.md

* Fix F632: replace 'is', 'is not' with ==, != respectively
上级 5f215e1c
......@@ -76,7 +76,7 @@ def _is_cpuonly(backend):
if (backend in ['auto', 'nccl', 'bkcl', 'hccl', 'heter', 'cncl'] and
(core.is_compiled_with_cuda() or core.is_compiled_with_xpu()
or core.is_compiled_with_npu()
or core.is_compiled_with_mlu())) or backend is 'xccl':
or core.is_compiled_with_mlu())) or backend == 'xccl':
# passes 'auto' and can use cuda or xpu, use the default logics. so return False
return False
......
......@@ -293,7 +293,7 @@ class TestStaticModel(unittest.TestCase):
# For PE
if use_pe:
places = paddle.static.cpu_places(
) if device is 'cpu' else paddle.static.cuda_places()
) if device == 'cpu' else paddle.static.cuda_places()
main_program = paddle.static.CompiledProgram(
paddle.static.default_main_program(
)).with_data_parallel(loss_name=loss.name,
......
......@@ -72,7 +72,7 @@ def custom_relu_static_pe(func, device, dtype, np_x, use_func=True):
paddle.enable_static()
paddle.set_device(device)
places = static.cpu_places() if device is 'cpu' else static.cuda_places()
places = static.cpu_places() if device == 'cpu' else static.cuda_places()
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
x = static.data(name='X', shape=[None, 8], dtype=dtype)
......
......@@ -278,7 +278,7 @@ class TransformerDecoder(nn.Layer):
self.num_layers = num_layers
self.layers = decoder_layers
self.norm = norm
if norm is "LayerNorm":
if norm == "LayerNorm":
self.norm = nn.LayerNorm(hidden_size)
elif norm is not None:
raise ValueError("Only support LayerNorm")
......
......@@ -266,7 +266,7 @@ class TransformerDecoder(nn.Layer):
self.num_layers = num_layers
self.layers = decoder_layers
self.norm = norm
if norm is "LayerNorm":
if norm == "LayerNorm":
self.norm = nn.LayerNorm(hidden_size)
elif norm is not None:
raise ValueError("Only support LayerNorm")
......
......@@ -313,7 +313,7 @@ class TransformerDecoder(nn.Layer):
self.num_layers = num_layers
self.layers = decoder_layers
self.norm = norm
if norm is "LayerNorm":
if norm == "LayerNorm":
self.norm = nn.LayerNorm(hidden_size)
elif norm is not None:
raise ValueError("Only support LayerNorm")
......
......@@ -26,7 +26,7 @@ from paddle import enable_static
def _lookup(weights, ids, flat_ids, op_version="lookup_table"):
w_shape = weights.shape
out_shape = list(ids.shape[:-1]) if op_version is "lookup_table" else list(
out_shape = list(ids.shape[:-1]) if op_version == "lookup_table" else list(
ids.shape)
out_shape.append(w_shape[-1])
out = weights[flat_ids].reshape(out_shape)
......@@ -36,7 +36,7 @@ def _lookup(weights, ids, flat_ids, op_version="lookup_table"):
def _get_grad(weights, ids, flat_ids, op_version="lookup_table"):
w_shape = weights.shape
w_grad = np.zeros((w_shape), dtype=weights.dtype)
out_shape = list(ids.shape[:-1]) if op_version is "lookup_table" else list(
out_shape = list(ids.shape[:-1]) if op_version == "lookup_table" else list(
ids.shape)
out_grad_shape = (np.prod(out_shape), w_shape[-1])
out_grad = weights[flat_ids].reshape(out_grad_shape)
......
......@@ -44,13 +44,13 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase):
def cal_np_out_and_gradient(self):
def _cal_np_out_and_gradient(func):
if func is 'amax':
if func == 'amax':
out = np.amax(self.x_np, axis=self.axis, keepdims=self.keepdim)
elif func is 'amin':
elif func == 'amin':
out = np.amin(self.x_np, axis=self.axis, keepdims=self.keepdim)
elif func is 'max':
elif func == 'max':
out = np.max(self.x_np, axis=self.axis, keepdims=self.keepdim)
elif func is 'min':
elif func == 'min':
out = np.min(self.x_np, axis=self.axis, keepdims=self.keepdim)
else:
print('This unittest only test amax/amin/max/min, but now is',
......@@ -74,13 +74,13 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase):
_cal_np_out_and_gradient('min')
def _choose_paddle_func(self, func, x):
if func is 'amax':
if func == 'amax':
out = paddle.amax(x, self.axis, self.keepdim)
elif func is 'amin':
elif func == 'amin':
out = paddle.amin(x, self.axis, self.keepdim)
elif func is 'max':
elif func == 'max':
out = paddle.max(x, self.axis, self.keepdim)
elif func is 'min':
elif func == 'min':
out = paddle.min(x, self.axis, self.keepdim)
else:
print('This unittest only test amax/amin/max/min, but now is', func)
......
......@@ -100,7 +100,7 @@ class TestFetchAndFeed(unittest.TestCase):
for k, v in all_vars.items():
if ('tmp' not in k) and (
k[0] is not '_' or v.persistable
k[0] != '_' or v.persistable
) and v.type == core.VarDesc.VarType.LOD_TENSOR:
fetch_list.append(k)
......
......@@ -131,7 +131,7 @@ class TestRealAPI(unittest.TestCase):
res = paddle_apis[self.api](input_t).numpy()
np.testing.assert_array_equal(np_res, res)
res_t = input_t.real().numpy(
) if self.api is "real" else input_t.imag().numpy()
) if self.api == "real" else input_t.imag().numpy()
np.testing.assert_array_equal(np_res, res_t)
def test_name_argument(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册