From 05a47f399f0150a262a842a4bb2aea0c677cd5e6 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Mon, 14 Nov 2022 14:49:45 +0800 Subject: [PATCH] [CodeStyle][F821] fix undefined variables due to missing imports, misspelled variable names (#47899) * `hann` -> `_hann` * `false` -> `False` * a missing passed argument `reduce_all` * some missing imports * `device_type` -> `heter_device_type` * `PKVClient` -> `KVClient` * fix some typos and missing imports --- python/paddle/audio/functional/window.py | 2 +- python/paddle/distributed/auto_parallel/cost/base_cost.py | 2 +- .../distributed/auto_parallel/operators/dist_reshape.py | 1 + python/paddle/distributed/auto_parallel/planner.py | 2 +- .../paddle/distributed/fleet/base/orthogonal_strategy.py | 4 +++- .../distributed/fleet/data_generator/data_generator.py | 8 ++------ python/paddle/distributed/fleet/layers/mpu/mp_ops.py | 1 + python/paddle/distributed/fleet/runtime/the_one_ps.py | 4 +++- python/paddle/distributed/fleet/utils/fs.py | 1 + python/paddle/distributed/launch/job/pod.py | 6 ++++-- python/paddle/distributed/launch/utils/kv_client.py | 2 +- .../passes/auto_parallel_data_parallel_optimization.py | 4 +++- python/paddle/distributed/ps/utils/public.py | 2 +- python/paddle/distribution/variable.py | 2 ++ .../unittests/ir/inference/test_trt_convert_equal.py | 2 +- python/paddle/fluid/tests/unittests/test_dropout_nd_op.py | 1 + python/paddle/fluid/tests/unittests/test_mean_op.py | 8 +++++--- 17 files changed, 32 insertions(+), 20 deletions(-) diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 4836afbb61..52ca3e4773 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -210,7 +210,7 @@ def _tukey( if alpha <= 0: return paddle.ones((M,), dtype=dtype) elif alpha >= 1.0: - return hann(M, sym=sym) + return _hann(M, sym=sym) M, needs_trunc = _extend(M, sym) diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/cost/base_cost.py index 249f64b7a0..f9dc6b6fc2 100644 --- a/python/paddle/distributed/auto_parallel/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/base_cost.py @@ -833,7 +833,7 @@ class CommOpCost(OpCost): if self.op_desc is not None: self._group_ranks = self.op_desc["group_ranks"] elif self.op is not None: - ring_id = op.attrs("ring_id") + ring_id = self.op.attrs("ring_id") process_group = get_process_group(ring_id) if process_group is None: raise ValueError( diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/operators/dist_reshape.py index dcb85bf596..b305d88d7d 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_reshape.py @@ -23,6 +23,7 @@ from .dist_default import DistributedDefaultImpl0 from ..cost import build_comp_desc_from_dist_op, build_comp_costs_from_descs from ..cost import Reshape2OpCost from ..cost import Reshape2GradOpCost +from ..cost import build_dp_costs from paddle.distributed.fleet.meta_optimizers.common import OpRole diff --git a/python/paddle/distributed/auto_parallel/planner.py b/python/paddle/distributed/auto_parallel/planner.py index 8148392f0a..15bf8058f7 100755 --- a/python/paddle/distributed/auto_parallel/planner.py +++ b/python/paddle/distributed/auto_parallel/planner.py @@ -437,7 +437,7 @@ class SearchAlgorithm: @property def name(self): - self.name = name + self.name = self._name def search(self): raise NotImplementedError("Please Implement this method in subclass.") diff --git a/python/paddle/distributed/fleet/base/orthogonal_strategy.py b/python/paddle/distributed/fleet/base/orthogonal_strategy.py index d0fec2cfdb..36af85d415 100644 --- a/python/paddle/distributed/fleet/base/orthogonal_strategy.py +++ b/python/paddle/distributed/fleet/base/orthogonal_strategy.py @@ -129,7 +129,9 @@ class OrthogonalStrategy: def _check_valid_strategy(self): assert len(self._list_of_strategy_name) == len( set(self._list_of_strategy_name) - ), "Defined duplicated strategies: {}".format(list_of_strategy) + ), "Defined duplicated strategies: {}".format( + self._list_of_strategy_name + ) num_of_ranks = functools.reduce( lambda x, y: x * y, self._list_of_degree ) diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index abf8f5f49f..297a2cf003 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -342,9 +342,7 @@ class MultiSlotDataGenerator(DataGenerator): for elem in elements: if isinstance(elem, float): self._proto_info[-1] = (name, "float") - elif not isinstance(elem, int) and not isinstance( - elem, long - ): + elif not isinstance(elem, int): raise ValueError( "the type of element%s must be in int or float" % type(elem) @@ -379,9 +377,7 @@ class MultiSlotDataGenerator(DataGenerator): if self._proto_info[index][1] != "float": if isinstance(elem, float): self._proto_info[index] = (name, "float") - elif not isinstance(elem, int) and not isinstance( - elem, long - ): + elif not isinstance(elem, int): raise ValueError( "the type of element%s must be in int or float" % type(elem) diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index 07fc4e7172..83ba760c9e 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -18,6 +18,7 @@ from paddle.fluid import core from paddle.fluid.framework import _non_static_mode from paddle.fluid.framework import _in_legacy_dygraph from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.framework import _varbase_creator from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.dygraph import layers diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 5176584550..98e0629978 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -902,7 +902,9 @@ class TheOnePSRuntime(RuntimeBase): heter_device_type = self.role_maker._heter_device_type().upper() if heter_device_type not in ["GPU", "XPU", "CPU"]: raise ValueError( - "Heter Worker Not Support Device {}".format(device_type) + "Heter Worker Not Support Device {}".format( + heter_device_type + ) ) if heter_device_type == "GPU": executor = Executor( diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index 667752e668..fd2338e36c 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -19,6 +19,7 @@ import re import time import abc from paddle.fluid import core +from .log_util import logger import functools import shutil diff --git a/python/paddle/distributed/launch/job/pod.py b/python/paddle/distributed/launch/job/pod.py index a322bcdccf..ef72263dd8 100644 --- a/python/paddle/distributed/launch/job/pod.py +++ b/python/paddle/distributed/launch/job/pod.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from .container import Container from .status import Status @@ -27,8 +29,8 @@ class PodSepc: ) # by controller - self._init_containers: List[Container] = [] - self._containers: List[Container] = [] + self._init_containers: list[Container] = [] + self._containers: list[Container] = [] # self.resource: Resource = None # self.status: Status = None diff --git a/python/paddle/distributed/launch/utils/kv_client.py b/python/paddle/distributed/launch/utils/kv_client.py index b60970382a..e048220335 100644 --- a/python/paddle/distributed/launch/utils/kv_client.py +++ b/python/paddle/distributed/launch/utils/kv_client.py @@ -79,7 +79,7 @@ class KVClient: if __name__ == '__main__': - cli = PKVClient("http://localhost:8090") + cli = KVClient("http://localhost:8090") data = {"/workers/1": "rank1", "/workers/2": "rank2"} for k, v in data.items(): cli.put(k, v) diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index cbc9170a1e..601cd31948 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -494,7 +494,9 @@ class DataParallelOptimizationPass(PassBase): for idx in sorted(remove_op_indices, reverse=True): assert ( block.ops[idx].type in remove_op_types - ), "Unexception: try to remove op {}".format(str(op)) + ), "Unexception: try to remove op {}".format( + str(block.ops[idx]) + ) block._remove_op(idx) # insert coalecse op diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 522bc5f36c..b15edf62d1 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -482,7 +482,7 @@ def get_geo_trainer_send_context(attrs): if attrs['ps_mode'] != DistributedMode.GEO: raise ValueError( "ps mode: {} not matched {}", - format(ps_mode, "get_geo_trainer_send_context"), + format(attrs['ps_mode'], "get_geo_trainer_send_context"), ) send_ctx = {} trainer_id = get_role_id(attrs['role_maker']) diff --git a/python/paddle/distribution/variable.py b/python/paddle/distribution/variable.py index 99cafc5ea7..58d4a75215 100644 --- a/python/paddle/distribution/variable.py +++ b/python/paddle/distribution/variable.py @@ -14,6 +14,8 @@ from paddle.distribution import constraint +import paddle + class Variable: """Random variable of probability distribution. diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py index ee73586e6e..ab873b4b3f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py @@ -27,7 +27,7 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] if attrs[0]['axis'] == 0: - return false + return False ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8415: return False diff --git a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py index a21ec9625f..e19a0104de 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py @@ -22,6 +22,7 @@ from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.framework import _non_static_mode from paddle import _legacy_C_ops from paddle.static import default_main_program +from paddle.fluid.data_feeder import check_variable_and_dtype def dropout_nd( diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 0c52d7596c..fb52745c75 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -140,7 +140,7 @@ def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False): return np.mean(x, axis=axis, keepdims=keepdim) -def ref_reduce_mean_grad(x, axis, dtype): +def ref_reduce_mean_grad(x, axis, dtype, reduce_all): if reduce_all: axis = list(range(x.ndim)) @@ -191,7 +191,6 @@ class TestReduceMeanOp(OpTest): if self.dtype != 'float16': self.check_grad(['X'], ['Out'], check_eager=True) else: - return if not core.is_compiled_with_cuda(): return place = paddle.CUDAPlace(0) @@ -204,7 +203,10 @@ class TestReduceMeanOp(OpTest): ) dx = paddle.grad(y, x)[0].numpy() dx_expected = ref_reduce_mean_grad( - self.inputs['X'], self.attrs['dim'], self.dtype + self.inputs['X'], + self.attrs['dim'], + self.dtype, + self.attrs['reduce_all'], ) np.testing.assert_array_equal(dx, dx_expected) -- GitLab