diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 4836afbb61d03f81422b8102efbd63a00b7b794e..52ca3e477324c33c9f98073f4d1ae503c8176f25 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -210,7 +210,7 @@ def _tukey( if alpha <= 0: return paddle.ones((M,), dtype=dtype) elif alpha >= 1.0: - return hann(M, sym=sym) + return _hann(M, sym=sym) M, needs_trunc = _extend(M, sym) diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/cost/base_cost.py index 249f64b7a09c2a6a58e5afd04ed6090f2a03eb7c..f9dc6b6fc273217c99f07a3e78fe0212e89beef7 100644 --- a/python/paddle/distributed/auto_parallel/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/base_cost.py @@ -833,7 +833,7 @@ class CommOpCost(OpCost): if self.op_desc is not None: self._group_ranks = self.op_desc["group_ranks"] elif self.op is not None: - ring_id = op.attrs("ring_id") + ring_id = self.op.attrs("ring_id") process_group = get_process_group(ring_id) if process_group is None: raise ValueError( diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/operators/dist_reshape.py index dcb85bf5966b412e82704596a026ee0db67827a6..b305d88d7df1b270b352b74c1d58198c2f47d25b 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_reshape.py @@ -23,6 +23,7 @@ from .dist_default import DistributedDefaultImpl0 from ..cost import build_comp_desc_from_dist_op, build_comp_costs_from_descs from ..cost import Reshape2OpCost from ..cost import Reshape2GradOpCost +from ..cost import build_dp_costs from paddle.distributed.fleet.meta_optimizers.common import OpRole diff --git a/python/paddle/distributed/auto_parallel/planner.py b/python/paddle/distributed/auto_parallel/planner.py index 8148392f0adb628e5acdae191c799a7a5ddcfb7c..15bf8058f739614335c966b92c00026da2d913bd 100755 --- a/python/paddle/distributed/auto_parallel/planner.py +++ b/python/paddle/distributed/auto_parallel/planner.py @@ -437,7 +437,7 @@ class SearchAlgorithm: @property def name(self): - self.name = name + self.name = self._name def search(self): raise NotImplementedError("Please Implement this method in subclass.") diff --git a/python/paddle/distributed/fleet/base/orthogonal_strategy.py b/python/paddle/distributed/fleet/base/orthogonal_strategy.py index d0fec2cfdb2d574a45b2e2c6c72118892f558b60..36af85d415d556709ca50a4be09f3d1d47cede47 100644 --- a/python/paddle/distributed/fleet/base/orthogonal_strategy.py +++ b/python/paddle/distributed/fleet/base/orthogonal_strategy.py @@ -129,7 +129,9 @@ class OrthogonalStrategy: def _check_valid_strategy(self): assert len(self._list_of_strategy_name) == len( set(self._list_of_strategy_name) - ), "Defined duplicated strategies: {}".format(list_of_strategy) + ), "Defined duplicated strategies: {}".format( + self._list_of_strategy_name + ) num_of_ranks = functools.reduce( lambda x, y: x * y, self._list_of_degree ) diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index abf8f5f49fa1f7b911a7d377627dda0b06b2ceeb..297a2cf003ecb4e7d6a5609a6d152475504b6c98 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -342,9 +342,7 @@ class MultiSlotDataGenerator(DataGenerator): for elem in elements: if isinstance(elem, float): self._proto_info[-1] = (name, "float") - elif not isinstance(elem, int) and not isinstance( - elem, long - ): + elif not isinstance(elem, int): raise ValueError( "the type of element%s must be in int or float" % type(elem) @@ -379,9 +377,7 @@ class MultiSlotDataGenerator(DataGenerator): if self._proto_info[index][1] != "float": if isinstance(elem, float): self._proto_info[index] = (name, "float") - elif not isinstance(elem, int) and not isinstance( - elem, long - ): + elif not isinstance(elem, int): raise ValueError( "the type of element%s must be in int or float" % type(elem) diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index 07fc4e7172b9f00ded095df45a5bb47c171eebe4..83ba760c9e0a788a51133c49f7004f99053bc7d1 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -18,6 +18,7 @@ from paddle.fluid import core from paddle.fluid.framework import _non_static_mode from paddle.fluid.framework import _in_legacy_dygraph from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.framework import _varbase_creator from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.dygraph import layers diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 51765845507a62688f511c1d1fe3130546af3b21..98e06299789135fac41b9868e18febf146ea17b5 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -902,7 +902,9 @@ class TheOnePSRuntime(RuntimeBase): heter_device_type = self.role_maker._heter_device_type().upper() if heter_device_type not in ["GPU", "XPU", "CPU"]: raise ValueError( - "Heter Worker Not Support Device {}".format(device_type) + "Heter Worker Not Support Device {}".format( + heter_device_type + ) ) if heter_device_type == "GPU": executor = Executor( diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index 667752e668a96f753a81013128344b77e101cbb2..fd2338e36cc25aa0d67348157a748094628dfaa6 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -19,6 +19,7 @@ import re import time import abc from paddle.fluid import core +from .log_util import logger import functools import shutil diff --git a/python/paddle/distributed/launch/job/pod.py b/python/paddle/distributed/launch/job/pod.py index a322bcdccfe6d7428d1719daebde031e3850183f..ef72263dd8758dda2b4b6ee6467982be42a4bc56 100644 --- a/python/paddle/distributed/launch/job/pod.py +++ b/python/paddle/distributed/launch/job/pod.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from .container import Container from .status import Status @@ -27,8 +29,8 @@ class PodSepc: ) # by controller - self._init_containers: List[Container] = [] - self._containers: List[Container] = [] + self._init_containers: list[Container] = [] + self._containers: list[Container] = [] # self.resource: Resource = None # self.status: Status = None diff --git a/python/paddle/distributed/launch/utils/kv_client.py b/python/paddle/distributed/launch/utils/kv_client.py index b60970382aee807bb5a26dab1f762e13bd9c0a53..e0482203357c72ff5d40d6024c4a44ecd7ad182b 100644 --- a/python/paddle/distributed/launch/utils/kv_client.py +++ b/python/paddle/distributed/launch/utils/kv_client.py @@ -79,7 +79,7 @@ class KVClient: if __name__ == '__main__': - cli = PKVClient("http://localhost:8090") + cli = KVClient("http://localhost:8090") data = {"/workers/1": "rank1", "/workers/2": "rank2"} for k, v in data.items(): cli.put(k, v) diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index cbc9170a1e49c089c24d019d24fef0aebe5090be..601cd31948b3fc754ed6417fce9969f22f1212b6 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -494,7 +494,9 @@ class DataParallelOptimizationPass(PassBase): for idx in sorted(remove_op_indices, reverse=True): assert ( block.ops[idx].type in remove_op_types - ), "Unexception: try to remove op {}".format(str(op)) + ), "Unexception: try to remove op {}".format( + str(block.ops[idx]) + ) block._remove_op(idx) # insert coalecse op diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 522bc5f36c16361845b5f68f608cafddbfad06ea..b15edf62d1e553dc312f8bd4912eb944632e6f00 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -482,7 +482,7 @@ def get_geo_trainer_send_context(attrs): if attrs['ps_mode'] != DistributedMode.GEO: raise ValueError( "ps mode: {} not matched {}", - format(ps_mode, "get_geo_trainer_send_context"), + format(attrs['ps_mode'], "get_geo_trainer_send_context"), ) send_ctx = {} trainer_id = get_role_id(attrs['role_maker']) diff --git a/python/paddle/distribution/variable.py b/python/paddle/distribution/variable.py index 99cafc5ea788ec29a3d09ba0d7ffba8bc89fe2e7..58d4a7521542eaf5c65cba355bab530ece0c54c6 100644 --- a/python/paddle/distribution/variable.py +++ b/python/paddle/distribution/variable.py @@ -14,6 +14,8 @@ from paddle.distribution import constraint +import paddle + class Variable: """Random variable of probability distribution. diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py index ee73586e6e96a4a228af351b3867f789bee34770..ab873b4b3f09e7487ea97d93c9d81017706bfac1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py @@ -27,7 +27,7 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] if attrs[0]['axis'] == 0: - return false + return False ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8415: return False diff --git a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py index a21ec9625f927873a5cfe6c3120288b37642ffb4..e19a0104de38d9d339874f2136808b0000612705 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py @@ -22,6 +22,7 @@ from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.framework import _non_static_mode from paddle import _legacy_C_ops from paddle.static import default_main_program +from paddle.fluid.data_feeder import check_variable_and_dtype def dropout_nd( diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 0c52d7596c129e27864d9630ccb19983c82974e8..fb52745c7593da5bec3f26ffed56083f8e01f749 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -140,7 +140,7 @@ def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False): return np.mean(x, axis=axis, keepdims=keepdim) -def ref_reduce_mean_grad(x, axis, dtype): +def ref_reduce_mean_grad(x, axis, dtype, reduce_all): if reduce_all: axis = list(range(x.ndim)) @@ -191,7 +191,6 @@ class TestReduceMeanOp(OpTest): if self.dtype != 'float16': self.check_grad(['X'], ['Out'], check_eager=True) else: - return if not core.is_compiled_with_cuda(): return place = paddle.CUDAPlace(0) @@ -204,7 +203,10 @@ class TestReduceMeanOp(OpTest): ) dx = paddle.grad(y, x)[0].numpy() dx_expected = ref_reduce_mean_grad( - self.inputs['X'], self.attrs['dim'], self.dtype + self.inputs['X'], + self.attrs['dim'], + self.dtype, + self.attrs['reduce_all'], ) np.testing.assert_array_equal(dx, dx_expected)