diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py index c2d79a62c7663a01d5cd1e7ca9ac705612e1db03..bceabeee3c3dce9f355bb9a31a037a13cca4edd3 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py @@ -14,6 +14,7 @@ from __future__ import print_function import sys +import paddle from paddle.optimizer import Optimizer from paddle.fluid.clip import ClipGradByGlobalNorm from ...utils.hybrid_parallel_util import fused_allreduce_gradients @@ -22,6 +23,8 @@ from paddle.fluid.dygraph import base as imperative_base from paddle.fluid import framework from paddle.fluid.framework import Variable from ...utils.log_util import logger +from paddle.fluid import core +from paddle.fluid import layers __all__ = [] diff --git a/python/paddle/fluid/contrib/model_stat.py b/python/paddle/fluid/contrib/model_stat.py index ca4bfac5ba5a14065af002b62f9987f5177fbd7a..11ab8800f287f415e4088ac47b4e4c48c066c4dd 100644 --- a/python/paddle/fluid/contrib/model_stat.py +++ b/python/paddle/fluid/contrib/model_stat.py @@ -150,6 +150,7 @@ def _format_summary(collected_ops_list): ''' _verify_dependent_package() + from prettytable import PrettyTable summary_table = PrettyTable( ["No.", "TYPE", "INPUT", "OUTPUT", "PARAMs", "FLOPs"]) summary_table.align = 'r' diff --git a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py index 0e4fd8f69dcd3fb5ecca5635c8b04df86d1e6bab..ea125ccf3fc6c09f3fff2a5ba97fff5ac279bab9 100644 --- a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py +++ b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py @@ -13,7 +13,7 @@ # limitations under the License. import os - +import sys import six import unittest import time diff --git a/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py b/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py index 3f33120d1f79f089d7511621611141683f0a03cd..3faf7f6862058d056ee43f2603873a4fc834334d 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py @@ -268,7 +268,7 @@ class AutoCheckpointTest(AutoCheckPointACLBase): def test_checker(self): os.environ.pop("PADDLE_JOB_ID", None) try: - checker = AutoCheckpointChecker() + checker = acp.AutoCheckpointChecker() self.assertFalse(True) except Exception as e: pass diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index 84fee8ace3ec427e81b12d36f32562dd0ab8c954..1cf0c145f830dec2c3438fa22f34b7bdf3522875 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -333,7 +333,7 @@ class TestDynamicRNNErrors(unittest.TestCase): hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh') - out = np.ones(1).astype('float32') + out = numpy.ones(1).astype('float32') drnn.update_memory(ex_mem=memory, new_mem=hidden) drnn.output(hidden, out) diff --git a/python/paddle/fluid/tests/unittests/test_onnx_export.py b/python/paddle/fluid/tests/unittests/test_onnx_export.py index 79d36063d77d5b128fdf3ba4a8a4fd711b226779..0985ed33af376c40e64ee93e636650a881970783 100644 --- a/python/paddle/fluid/tests/unittests/test_onnx_export.py +++ b/python/paddle/fluid/tests/unittests/test_onnx_export.py @@ -47,7 +47,7 @@ class TestExportWithTensor(unittest.TestCase): self.x_spec = paddle.static.InputSpec( shape=[None, 128], dtype='float32') - def test_with_tensor(): + def test_with_tensor(self): model = LinearNet() paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec]) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 160d6c54759d901e2529221c99dce63b29f06810..40cba4f45d8ef2899ec3c4a66cdf217adc0ed205 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -163,7 +163,7 @@ def init_communicator(program, rank, nranks, wait_port, current_endpoint, }) elif core.is_compiled_with_npu(): hccl_id_var = block.create_var( - name=unique_name.generate('hccl_id'), + name=fluid.unique_name.generate('hccl_id'), persistable=True, type=core.VarDesc.VarType.RAW) endpoint_to_index_map = {e: idx for idx, e in enumerate(endpoints)} @@ -710,10 +710,10 @@ class DynamicGraphAdapter(object): enable=self._amp_level != 'O0', **self._amp_custom_lists): if self._nranks > 1: outputs = self.ddp_model.forward( - * [to_variable(x) for x in inputs]) + *[to_variable(x) for x in inputs]) else: outputs = self.model.network.forward( - * [to_variable(x) for x in inputs]) + *[to_variable(x) for x in inputs]) losses = self.model._loss(*(to_list(outputs) + labels)) losses = to_list(losses) @@ -732,7 +732,7 @@ class DynamicGraphAdapter(object): metrics = [] for metric in self.model._metrics: metric_outs = metric.compute(*(to_list(outputs) + labels)) - m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)]) + m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) return ([to_numpy(l) for l in losses], metrics) \ @@ -746,7 +746,7 @@ class DynamicGraphAdapter(object): labels = labels or [] labels = [to_variable(l) for l in to_list(labels)] - outputs = self.model.network.forward(* [to_variable(x) for x in inputs]) + outputs = self.model.network.forward(*[to_variable(x) for x in inputs]) if self.model._loss: losses = self.model._loss(*(to_list(outputs) + labels)) losses = to_list(losses) @@ -777,7 +777,7 @@ class DynamicGraphAdapter(object): self._merge_count[self.mode + '_batch'] = samples metric_outs = metric.compute(*(to_list(outputs) + labels)) - m = metric.update(* [to_numpy(m) for m in to_list(metric_outs)]) + m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) if self.model._loss and len(metrics):