diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 1d19fda1b8307d3b4c6ccbd891f2c4fa8ee24839..3d11d8cab38b072284ea8d8ed121425a3aabec3e 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -256,7 +256,7 @@ def add_to_collection(collection_name, value, name=None): def fetch(tensor, name=None, logging=False): - if isinstance(tensor, paddle.fluid.framework.Variable): + if isinstance(tensor, paddle.static.Variable): tensor = tensor.name elif isinstance(tensor, str): tensor = tensor diff --git a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py b/python/paddle/distributed/auto_parallel/operators/dist_embedding.py index d92dd8f94d37ba45d0d8665ed67ae3c5be8baca2..b95bdb38b61295c2d666dfc7ee708706524ce8b8 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_embedding.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License +from paddle.common_ops_import import check_dtype, check_variable_and_dtype from paddle.distributed.auto_parallel.cost.comm_op_cost import ( AllreduceSumOpCost, IdentityOpCost, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype from paddle.framework import core from paddle.utils import unique_name diff --git a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py b/python/paddle/distributed/auto_parallel/operators/dist_matmul.py index a89da6fd4e4a6688c95914f6973dae255ff61c27..62f8577ff3e87cfae9f0fb08db0b9c49852719de 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_matmul.py @@ -14,12 +14,12 @@ import copy +from paddle.common_ops_import import check_dtype, check_variable_and_dtype from paddle.distributed.auto_parallel.cost.comm_op_cost import ( AllreduceSumOpCost, IdentityOpCost, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype from paddle.framework import core from paddle.utils import unique_name diff --git a/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py b/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py index 228a8961a68300a0161bca217f05d2c7c86bcb35..518ba4814bfd79104f6fdf8d15df3c6ea6e2b8b7 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py @@ -14,7 +14,7 @@ import copy -from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype +from paddle.common_ops_import import check_dtype, check_variable_and_dtype from paddle.framework import core from paddle.static import Operator diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index a7786cbea0cbfa10e06734d53dc57a4dc315b427..34fecda5169e1eb31a1671be39e1446ea26afc3f 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -17,7 +17,7 @@ import copy import numpy as np import paddle -from paddle.fluid import core +from paddle.framework import core # Use to store the previous and current process mesh _g_previous_process_mesh = None diff --git a/python/paddle/distributed/passes/auto_parallel_bf16.py b/python/paddle/distributed/passes/auto_parallel_bf16.py index b733ab38ad742631cdcb61bb6c7095d3949abe88..a2fec1d44dde05bf1531c50882403241c3c52c6a 100644 --- a/python/paddle/distributed/passes/auto_parallel_bf16.py +++ b/python/paddle/distributed/passes/auto_parallel_bf16.py @@ -25,9 +25,7 @@ from paddle.distributed.auto_parallel.utils import ( ) from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.distributed.passes.pass_base import PassBase, register_pass -from paddle.fluid import unique_name -from paddle.fluid.framework import Block -from paddle.framework import core +from paddle.framework import Block, core from paddle.static.amp.bf16 import AutoMixedPrecisionListsBF16 from paddle.static.amp.bf16.amp_utils import ( _dtype_to_str, @@ -40,6 +38,7 @@ from paddle.static.amp.fp16_utils import ( find_op_index, find_true_prev_op, ) +from paddle.utils import unique_name from ..auto_parallel.utils import is_backward_op, is_forward_op, is_loss_op @@ -499,7 +498,7 @@ def _update_backward_cast_ops(params_grads, dist_context): # add new op in the python and cpp at the same time new_op_desc = main_block.desc.append_op() new_op_desc.copy_from(op.desc) - new_op = paddle.fluid.framework.Operator( + new_op = paddle.static.Operator( block=main_block, desc=new_op_desc, type=None, diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index 1d61100b72f775c70e819a56d9fc7c83c706a869..50307ce22f8428b08bc5413ededd95517b0dd16f 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -15,6 +15,7 @@ from collections import defaultdict import paddle +from paddle.common_ops_import import check_type, check_variable_and_dtype from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr from paddle.distributed.auto_parallel.process_group import ( get_world_process_group, @@ -26,7 +27,6 @@ from paddle.distributed.auto_parallel.utils import ( set_var_dist_attr, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.fluid.data_feeder import check_type, check_variable_and_dtype from paddle.framework import core from paddle.static import default_main_program, default_startup_program from paddle.static.amp.fp16_utils import (