未验证 提交 4e09b089 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][F821] fix remaining F821 issues (#47968)

* [CodeStyle][F821] fix remained F821 issues

* refine comment

* fix _set_item
上级 aa08b769
......@@ -23,7 +23,7 @@ ignore =
# F, see https://flake8.pycqa.org/en/latest/user/error-codes.html
F405,
F811,F821,F841,
F811,F841,
# W, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
W503
......@@ -33,3 +33,7 @@ per-file-ignores =
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191
# Ignore unused imports in __init__.py
__init__.py: F401
# Ignore undefined variables in CMake config and some dygraph_to_static tests
.cmake-format.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821
......@@ -1510,8 +1510,12 @@ class Completer:
self._dist_context.set_op_dist_attr_for_program(
grad_op, grad_op_dist_attr
)
grad_op_dist_attr.impl_type = fwd_op_dist_attr.impl_type
grad_op_dist_attr.impl_idx = fwd_op_dist_attr.impl_idx
grad_op_dist_attr.impl_type = (
fwd_op_dist_attr.impl_type # noqa: F821
)
grad_op_dist_attr.impl_idx = (
fwd_op_dist_attr.impl_idx # noqa: F821
)
continue
......
......@@ -100,9 +100,9 @@ class TensorCost:
if dtype == paddle.float32 or dtype == paddle.int32:
dtype_factor = 4
elif node.dtype == paddle.int64:
elif dtype == paddle.int64:
dtype_factor = 8
elif node.dtype == paddle.uint8:
elif dtype == paddle.uint8:
dtype_factor = 1
else:
dtype_factor = 2
......
......@@ -141,7 +141,7 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl):
def backward(ctx, *args, **kwargs):
raise RuntimeError(
"primitive operator does NOT have backward function, op type: {}".format(
str(op.type)
str(op.type) # noqa: F821
)
)
......
......@@ -299,6 +299,8 @@ class TestPushPopTrans(unittest.TestCase):
print(paddle.jit.to_static(vlist_of_dict)(x))
def test4(self):
import numpy as np
def vlist_of_dict(x):
a = np.array([1, 2, 3])
for i in range(3):
......@@ -310,6 +312,8 @@ class TestPushPopTrans(unittest.TestCase):
print(paddle.jit.to_static(vlist_of_dict)(x))
def test5(self):
import numpy as np
def vlist_of_dict(x):
a = np.array([1, 2, 3])
for i in range(3):
......
......@@ -296,7 +296,9 @@ class YOLOv3(fluid.dygraph.Layer):
blocks = self.block(inputs)
for i, block in enumerate(blocks):
if i > 0:
block = fluid.layers.concat(input=[route, block], axis=1)
block = fluid.layers.concat(
input=[route, block], axis=1 # noqa: F821
)
route, tip = self.yolo_blocks[i](block)
block_out = self.block_outputs[i](tip)
self.outputs.append(block_out)
......
......@@ -59,7 +59,7 @@ def _get_item(t, i, np_dtype):
raise ValueError("Not supported data type " + str(np_dtype))
def _set_item(t, i, e, np_dtype):
def _set_item(t, i, e, np_dtype, place):
if np_dtype == np.float16:
np_t = np.array(t).astype(np.float16)
shape = np_t.shape
......@@ -145,14 +145,14 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta):
for i in range(x_size):
orig = _get_item(x_t, i, np_type)
x_pos = orig + delta
_set_item(x_t, i, x_pos, np_type)
_set_item(x_t, i, x_pos, np_type, place)
y_pos = run()
x_neg = orig - delta
_set_item(x_t, i, x_neg, np_type)
_set_item(x_t, i, x_neg, np_type, place)
y_neg = run()
_set_item(x_t, i, orig, np_type)
_set_item(x_t, i, orig, np_type, place)
for j in range(len(y)):
jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.0
......@@ -207,7 +207,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope):
filted_idx, filted_dx = zip(*filted)
for i in range(y_size):
_set_item(dy_t, i, 1, np_type)
_set_item(dy_t, i, 1, np_type, place)
dx_res = exe.run(program, scope=scope, fetch_list=filted_dx)
......@@ -220,7 +220,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope):
dx[dx_idx].shape, dtype=np_type
).flatten()
_set_item(dy_t, i, 0, np_type)
_set_item(dy_t, i, 0, np_type, place)
return jacobian
......
......@@ -60,8 +60,12 @@ class MLPLayer(nn.Layer):
def forward(self, input):
if _global_parallel_strategy == "pp":
auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None])
auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None])
auto.shard_tensor(
self.linear0.weight, PP_MESH_0, [None, None] # noqa: F821
)
auto.shard_tensor(
self.linear1.weight, PP_MESH_1, [None, None] # noqa: F821
)
else:
auto.shard_tensor(
self.linear0.weight, _global_process_mesh, [None, None]
......@@ -93,8 +97,8 @@ def mlp_forward(train_program, start_program):
)
if _global_parallel_strategy == "pp":
auto.shard_tensor(input, PP_MESH_0, [None, None])
auto.shard_tensor(label, PP_MESH_1, [None, None])
auto.shard_tensor(input, PP_MESH_0, [None, None]) # noqa: F821
auto.shard_tensor(label, PP_MESH_1, [None, None]) # noqa: F821
elif _global_parallel_strategy == "dp":
auto.shard_tensor(input, _global_process_mesh, ["x", None])
else:
......
......@@ -86,7 +86,9 @@ class TestReorderLoDTensor(unittest.TestCase):
lod_level_i = np.random.randint(
low=1,
high=5,
size=self.num_seq if i == 0 else sum(lod_level_i),
size=self.num_seq
if i == 0
else sum(lod_level_i), # noqa: F821
).tolist()
data_lod.append(lod_level_i)
data_value = np.random.random(
......
......@@ -83,7 +83,9 @@ class TestWeightNormalization(unittest.TestCase):
lod_level_i = np.random.randint(
low=1,
high=5,
size=self.batch_size if i == 0 else sum(lod_level_i),
size=self.batch_size
if i == 0
else sum(lod_level_i), # noqa: F821
).tolist()
data_lod.append(lod_level_i)
data_value = np.random.random(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册