diff --git a/.flake8 b/.flake8 index 6204cfa9f8b955da6ee18296e64112744e71cf7b..84ebba974763b1a6539c0a507cd40bcc62fcd742 100644 --- a/.flake8 +++ b/.flake8 @@ -23,7 +23,7 @@ ignore = # F, see https://flake8.pycqa.org/en/latest/user/error-codes.html F405, - F811,F821,F841, + F811,F841, # W, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes W503 @@ -33,3 +33,7 @@ per-file-ignores = python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191 # Ignore unused imports in __init__.py __init__.py: F401 + # Ignore undefined variables in CMake config and some dygraph_to_static tests + .cmake-format.py: F821 + python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821 + python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821 diff --git a/python/paddle/distributed/auto_parallel/completion.py b/python/paddle/distributed/auto_parallel/completion.py index 02a8c17247534732a175a5ab5472799152cc67a3..c0f70f482dd17f598c483c44f19e965d914bf7e2 100644 --- a/python/paddle/distributed/auto_parallel/completion.py +++ b/python/paddle/distributed/auto_parallel/completion.py @@ -1510,8 +1510,12 @@ class Completer: self._dist_context.set_op_dist_attr_for_program( grad_op, grad_op_dist_attr ) - grad_op_dist_attr.impl_type = fwd_op_dist_attr.impl_type - grad_op_dist_attr.impl_idx = fwd_op_dist_attr.impl_idx + grad_op_dist_attr.impl_type = ( + fwd_op_dist_attr.impl_type # noqa: F821 + ) + grad_op_dist_attr.impl_idx = ( + fwd_op_dist_attr.impl_idx # noqa: F821 + ) continue diff --git a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py b/python/paddle/distributed/auto_parallel/cost/tensor_cost.py index 0303e29749f9ab9786153cb84e0df5c92891e074..9d0794e23757daefa16ad94787ce632bc822538f 100644 --- a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/tensor_cost.py @@ -100,9 +100,9 @@ class TensorCost: if dtype == paddle.float32 or dtype == paddle.int32: dtype_factor = 4 - elif node.dtype == paddle.int64: + elif dtype == paddle.int64: dtype_factor = 8 - elif node.dtype == paddle.uint8: + elif dtype == paddle.uint8: dtype_factor = 1 else: dtype_factor = 2 diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py b/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py index b9ccb7b3c32a92a187da07aeab5e19e49c7f223f..01b326d3a562c75a7f122d677b406bf8aa687de5 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py @@ -141,7 +141,7 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): def backward(ctx, *args, **kwargs): raise RuntimeError( "primitive operator does NOT have backward function, op type: {}".format( - str(op.type) + str(op.type) # noqa: F821 ) ) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py index 8712950e01b05f89574efb0529f459aa8f6b1061..0210e260f8238a66a7011fb382d62cc4bb8091f7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py @@ -299,6 +299,8 @@ class TestPushPopTrans(unittest.TestCase): print(paddle.jit.to_static(vlist_of_dict)(x)) def test4(self): + import numpy as np + def vlist_of_dict(x): a = np.array([1, 2, 3]) for i in range(3): @@ -310,6 +312,8 @@ class TestPushPopTrans(unittest.TestCase): print(paddle.jit.to_static(vlist_of_dict)(x)) def test5(self): + import numpy as np + def vlist_of_dict(x): a = np.array([1, 2, 3]) for i in range(3): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py index 6ed5758893f11411706ee60efd0c2acb7a2a6183..28078aba7893c427a9c2df0cc7b4b654a7bd8cce 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py @@ -296,7 +296,9 @@ class YOLOv3(fluid.dygraph.Layer): blocks = self.block(inputs) for i, block in enumerate(blocks): if i > 0: - block = fluid.layers.concat(input=[route, block], axis=1) + block = fluid.layers.concat( + input=[route, block], axis=1 # noqa: F821 + ) route, tip = self.yolo_blocks[i](block) block_out = self.block_outputs[i](tip) self.outputs.append(block_out) diff --git a/python/paddle/fluid/tests/unittests/gradient_checker.py b/python/paddle/fluid/tests/unittests/gradient_checker.py index 6e20037185285b8d1220c50747592ed18d864885..9b08f17dadd7bb00c638aebd2c64453a9ebc44d3 100644 --- a/python/paddle/fluid/tests/unittests/gradient_checker.py +++ b/python/paddle/fluid/tests/unittests/gradient_checker.py @@ -59,7 +59,7 @@ def _get_item(t, i, np_dtype): raise ValueError("Not supported data type " + str(np_dtype)) -def _set_item(t, i, e, np_dtype): +def _set_item(t, i, e, np_dtype, place): if np_dtype == np.float16: np_t = np.array(t).astype(np.float16) shape = np_t.shape @@ -145,14 +145,14 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta): for i in range(x_size): orig = _get_item(x_t, i, np_type) x_pos = orig + delta - _set_item(x_t, i, x_pos, np_type) + _set_item(x_t, i, x_pos, np_type, place) y_pos = run() x_neg = orig - delta - _set_item(x_t, i, x_neg, np_type) + _set_item(x_t, i, x_neg, np_type, place) y_neg = run() - _set_item(x_t, i, orig, np_type) + _set_item(x_t, i, orig, np_type, place) for j in range(len(y)): jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.0 @@ -207,7 +207,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope): filted_idx, filted_dx = zip(*filted) for i in range(y_size): - _set_item(dy_t, i, 1, np_type) + _set_item(dy_t, i, 1, np_type, place) dx_res = exe.run(program, scope=scope, fetch_list=filted_dx) @@ -220,7 +220,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope): dx[dx_idx].shape, dtype=np_type ).flatten() - _set_item(dy_t, i, 0, np_type) + _set_item(dy_t, i, 0, np_type, place) return jacobian diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py index a150683a415f335d51525b3c3ce58ecd90cc266d..5c55ced4f292c7ef2855ddf412cdbfd3cf602040 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py @@ -60,8 +60,12 @@ class MLPLayer(nn.Layer): def forward(self, input): if _global_parallel_strategy == "pp": - auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None]) - auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None]) + auto.shard_tensor( + self.linear0.weight, PP_MESH_0, [None, None] # noqa: F821 + ) + auto.shard_tensor( + self.linear1.weight, PP_MESH_1, [None, None] # noqa: F821 + ) else: auto.shard_tensor( self.linear0.weight, _global_process_mesh, [None, None] @@ -93,8 +97,8 @@ def mlp_forward(train_program, start_program): ) if _global_parallel_strategy == "pp": - auto.shard_tensor(input, PP_MESH_0, [None, None]) - auto.shard_tensor(label, PP_MESH_1, [None, None]) + auto.shard_tensor(input, PP_MESH_0, [None, None]) # noqa: F821 + auto.shard_tensor(label, PP_MESH_1, [None, None]) # noqa: F821 elif _global_parallel_strategy == "dp": auto.shard_tensor(input, _global_process_mesh, ["x", None]) else: diff --git a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py index dbf14c81948f9ba0b638187f7f16c0cd818c2452..14d9676b339d4c5ac407ad662842706f490f2d6e 100644 --- a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py @@ -86,7 +86,9 @@ class TestReorderLoDTensor(unittest.TestCase): lod_level_i = np.random.randint( low=1, high=5, - size=self.num_seq if i == 0 else sum(lod_level_i), + size=self.num_seq + if i == 0 + else sum(lod_level_i), # noqa: F821 ).tolist() data_lod.append(lod_level_i) data_value = np.random.random( diff --git a/python/paddle/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py index 9d77dadf8dc09ea863edf64235f6e1b857c81e8f..0481096d5760b695d952a5c825a78a8130af3e19 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_normalization.py +++ b/python/paddle/fluid/tests/unittests/test_weight_normalization.py @@ -83,7 +83,9 @@ class TestWeightNormalization(unittest.TestCase): lod_level_i = np.random.randint( low=1, high=5, - size=self.batch_size if i == 0 else sum(lod_level_i), + size=self.batch_size + if i == 0 + else sum(lod_level_i), # noqa: F821 ).tolist() data_lod.append(lod_level_i) data_value = np.random.random(