未验证 提交 e70af91d 编写于 作者: Y Yulong Ao 提交者: GitHub

[Auto Parallel] Clear some fluid APIs (#49793)

* [Auto Parallel] Rename methods of ProcessMesh

* [Auto Parallel] Impl the python process_mesh by the c++ one

* [Auto Parallel] Add some minor modifications

* [Auto Parallel] Rename some methods

* [Auto Parallel] Remove unnecessary codes

* [Auto Parallel] Add back some removed files

* [Auto Parallel] Fix bugs

* [Auto Parallel] Fix a bug

* Update process_mesh.cc

* [Auto Parallel] Merge dist attrs of Python into C++

* [Auto Parallel] Add back deleted importing

* [Auto Parallel] Add back removed unittest

* [Auto Parallel] Remove type qualifiers of return types

* [Auto Parallel] Fix some bugs

* [Auto Parallel] Fix a bug of the quant pass

* [Auto Parallel] Fix the code style

* [Auto Parallel] Clear some fluid APIs
上级 6a56bce7
...@@ -256,7 +256,7 @@ def add_to_collection(collection_name, value, name=None): ...@@ -256,7 +256,7 @@ def add_to_collection(collection_name, value, name=None):
def fetch(tensor, name=None, logging=False): def fetch(tensor, name=None, logging=False):
if isinstance(tensor, paddle.fluid.framework.Variable): if isinstance(tensor, paddle.static.Variable):
tensor = tensor.name tensor = tensor.name
elif isinstance(tensor, str): elif isinstance(tensor, str):
tensor = tensor tensor = tensor
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License # limitations under the License
from paddle.common_ops_import import check_dtype, check_variable_and_dtype
from paddle.distributed.auto_parallel.cost.comm_op_cost import ( from paddle.distributed.auto_parallel.cost.comm_op_cost import (
AllreduceSumOpCost, AllreduceSumOpCost,
IdentityOpCost, IdentityOpCost,
) )
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from paddle.framework import core from paddle.framework import core
from paddle.utils import unique_name from paddle.utils import unique_name
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
import copy import copy
from paddle.common_ops_import import check_dtype, check_variable_and_dtype
from paddle.distributed.auto_parallel.cost.comm_op_cost import ( from paddle.distributed.auto_parallel.cost.comm_op_cost import (
AllreduceSumOpCost, AllreduceSumOpCost,
IdentityOpCost, IdentityOpCost,
) )
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from paddle.framework import core from paddle.framework import core
from paddle.utils import unique_name from paddle.utils import unique_name
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import copy import copy
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype from paddle.common_ops_import import check_dtype, check_variable_and_dtype
from paddle.framework import core from paddle.framework import core
from paddle.static import Operator from paddle.static import Operator
......
...@@ -17,7 +17,7 @@ import copy ...@@ -17,7 +17,7 @@ import copy
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid import core from paddle.framework import core
# Use to store the previous and current process mesh # Use to store the previous and current process mesh
_g_previous_process_mesh = None _g_previous_process_mesh = None
......
...@@ -25,9 +25,7 @@ from paddle.distributed.auto_parallel.utils import ( ...@@ -25,9 +25,7 @@ from paddle.distributed.auto_parallel.utils import (
) )
from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.distributed.passes.pass_base import PassBase, register_pass from paddle.distributed.passes.pass_base import PassBase, register_pass
from paddle.fluid import unique_name from paddle.framework import Block, core
from paddle.fluid.framework import Block
from paddle.framework import core
from paddle.static.amp.bf16 import AutoMixedPrecisionListsBF16 from paddle.static.amp.bf16 import AutoMixedPrecisionListsBF16
from paddle.static.amp.bf16.amp_utils import ( from paddle.static.amp.bf16.amp_utils import (
_dtype_to_str, _dtype_to_str,
...@@ -40,6 +38,7 @@ from paddle.static.amp.fp16_utils import ( ...@@ -40,6 +38,7 @@ from paddle.static.amp.fp16_utils import (
find_op_index, find_op_index,
find_true_prev_op, find_true_prev_op,
) )
from paddle.utils import unique_name
from ..auto_parallel.utils import is_backward_op, is_forward_op, is_loss_op from ..auto_parallel.utils import is_backward_op, is_forward_op, is_loss_op
...@@ -499,7 +498,7 @@ def _update_backward_cast_ops(params_grads, dist_context): ...@@ -499,7 +498,7 @@ def _update_backward_cast_ops(params_grads, dist_context):
# add new op in the python and cpp at the same time # add new op in the python and cpp at the same time
new_op_desc = main_block.desc.append_op() new_op_desc = main_block.desc.append_op()
new_op_desc.copy_from(op.desc) new_op_desc.copy_from(op.desc)
new_op = paddle.fluid.framework.Operator( new_op = paddle.static.Operator(
block=main_block, block=main_block,
desc=new_op_desc, desc=new_op_desc,
type=None, type=None,
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from collections import defaultdict from collections import defaultdict
import paddle import paddle
from paddle.common_ops_import import check_type, check_variable_and_dtype
from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr
from paddle.distributed.auto_parallel.process_group import ( from paddle.distributed.auto_parallel.process_group import (
get_world_process_group, get_world_process_group,
...@@ -26,7 +27,6 @@ from paddle.distributed.auto_parallel.utils import ( ...@@ -26,7 +27,6 @@ from paddle.distributed.auto_parallel.utils import (
set_var_dist_attr, set_var_dist_attr,
) )
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from paddle.fluid.data_feeder import check_type, check_variable_and_dtype
from paddle.framework import core from paddle.framework import core
from paddle.static import default_main_program, default_startup_program from paddle.static import default_main_program, default_startup_program
from paddle.static.amp.fp16_utils import ( from paddle.static.amp.fp16_utils import (
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册