未验证 提交 167e6488 编写于 作者: W wangxiaoning 提交者: GitHub

[Fluid clean]clean fluid.transpiler.details (#50564)

* move fluid.transpiler.details

* fix setup

* fix

* fix setup

* add setup
上级 2364a6bc
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .program_utils import * from .program_utils import (
from .ufind import * delete_ops,
from .vars_distributed import * find_op_by_input_arg,
find_op_by_output_arg,
)
from .ufind import UnionFind
from .vars_distributed import VarStruct, VarDistributed, VarsDistributed
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.fluid import core
import paddle
def delete_ops(block, ops): def delete_ops(block, ops):
for op in ops: for op in ops:
......
...@@ -40,7 +40,7 @@ class UnionFind: ...@@ -40,7 +40,7 @@ class UnionFind:
def find(self, x): def find(self, x):
# Find the root index of given element x, # Find the root index of given element x,
# execute the path compress while findind the root index # execute the path compress while findind the root index
if not x in self._index: if x not in self._index:
return -1 return -1
idx = self._index[x] idx = self._index[x]
while idx != self._parents[idx]: while idx != self._parents[idx]:
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.fluid.framework import Variable from paddle.static import Variable
class VarStruct: class VarStruct:
...@@ -117,8 +117,6 @@ class VarDistributed: ...@@ -117,8 +117,6 @@ class VarDistributed:
def __str__(self): def __str__(self):
origin_var_str = ( origin_var_str = (
"{name} : fluid.{type}.shape{shape}.astype({dtype})".format( "{name} : fluid.{type}.shape{shape}.astype({dtype})".format(
i="{",
e="}",
name=self.origin.name, name=self.origin.name,
type=self.origin.type, type=self.origin.type,
shape=self.origin.shape, shape=self.origin.shape,
...@@ -129,8 +127,6 @@ class VarDistributed: ...@@ -129,8 +127,6 @@ class VarDistributed:
slice_var_str = ( slice_var_str = (
"{name} : fluid.{type}.shape{shape}.astype({dtype})" "{name} : fluid.{type}.shape{shape}.astype({dtype})"
".slice({is_slice}).block({block_id}).offset({offset})".format( ".slice({is_slice}).block({block_id}).offset({offset})".format(
i="{",
e="}",
name=self.slice.name, name=self.slice.name,
type=self.slice.type, type=self.slice.type,
shape=self.slice.shape, shape=self.slice.shape,
......
...@@ -27,7 +27,6 @@ from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import ( ...@@ -27,7 +27,6 @@ from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import (
RoundRobin, RoundRobin,
PSDispatcher, PSDispatcher,
) )
from paddle.fluid.transpiler.details.program_utils import delete_ops
OP_NAME_SCOPE = "op_namescope" OP_NAME_SCOPE = "op_namescope"
CLIP_OP_NAME_SCOPE = "gradient_clip" CLIP_OP_NAME_SCOPE = "gradient_clip"
......
...@@ -23,7 +23,7 @@ import paddle ...@@ -23,7 +23,7 @@ import paddle
from paddle.framework import core from paddle.framework import core
import paddle.framework as framework import paddle.framework as framework
from paddle.fluid.transpiler.details.program_utils import delete_ops from paddle.distributed.transpiler.details.program_utils import delete_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( from paddle.fluid.incubate.fleet.parameter_server.ir.public import (
_get_optimize_ops, _get_optimize_ops,
) )
......
...@@ -48,8 +48,6 @@ from ..framework import ( ...@@ -48,8 +48,6 @@ from ..framework import (
Parameter, Parameter,
grad_var_name, grad_var_name,
) )
from .details import UnionFind, VarStruct, VarsDistributed
from .details import delete_ops, find_op_by_output_arg
LOOKUP_TABLE_TYPE = ["lookup_table", "lookup_table_v2"] LOOKUP_TABLE_TYPE = ["lookup_table", "lookup_table_v2"]
LOOKUP_TABLE_GRAD_TYPE = ["lookup_table_grad", "lookup_table_v2_grad"] LOOKUP_TABLE_GRAD_TYPE = ["lookup_table_grad", "lookup_table_v2_grad"]
...@@ -619,6 +617,10 @@ class DistributeTranspiler: ...@@ -619,6 +617,10 @@ class DistributeTranspiler:
from paddle.distributed.distribute_lookup_table import ( from paddle.distributed.distribute_lookup_table import (
find_distributed_lookup_table, find_distributed_lookup_table,
) )
from paddle.distributed.transpiler.details import (
VarsDistributed,
find_op_by_output_arg,
)
err_msg = """ err_msg = """
...@@ -1044,6 +1046,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -1044,6 +1046,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
def _fake_init_sparsetable(self, sparse_table_names): def _fake_init_sparsetable(self, sparse_table_names):
# delete table init op # delete table init op
from paddle.distributed.transpiler.details import delete_ops
for table_name in sparse_table_names: for table_name in sparse_table_names:
table_var = self.startup_program.global_block().vars[table_name] table_var = self.startup_program.global_block().vars[table_name]
table_param_init_op = [] table_param_init_op = []
...@@ -1065,6 +1069,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -1065,6 +1069,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
delete_ops(self.startup_program.global_block(), table_param_init_op) delete_ops(self.startup_program.global_block(), table_param_init_op)
def _delete_trainer_optimizer(self, is_startup): def _delete_trainer_optimizer(self, is_startup):
from paddle.distributed.transpiler.details import delete_ops
optimize_vars = [] optimize_vars = []
optimize_op_role_vars = [] optimize_op_role_vars = []
optimize_need_delete_vars = [] optimize_need_delete_vars = []
...@@ -1132,6 +1138,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -1132,6 +1138,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
from paddle.distributed.fleet.base.private_helper_function import ( from paddle.distributed.fleet.base.private_helper_function import (
wait_server_ready, wait_server_ready,
) )
from paddle.distributed.transpiler.details import delete_ops
self._delete_trainer_optimizer(is_startup=True) self._delete_trainer_optimizer(is_startup=True)
sparse_table_names = self._get_sparse_table_names() sparse_table_names = self._get_sparse_table_names()
...@@ -1720,6 +1727,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -1720,6 +1727,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
def _get_distributed_optimizer_vars(self): def _get_distributed_optimizer_vars(self):
def _get_distributed_optimizer_var(endpoint): def _get_distributed_optimizer_var(endpoint):
from paddle.distributed.transpiler.details import VarStruct
opt_op_on_pserver = [] opt_op_on_pserver = []
for _, op in enumerate(self.optimize_ops): for _, op in enumerate(self.optimize_ops):
if self._is_optimizer_op(op) and self._is_opt_op_on_pserver( if self._is_optimizer_op(op) and self._is_opt_op_on_pserver(
...@@ -1927,6 +1936,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -1927,6 +1936,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
def _replace_lookup_table_op_with_prefetch( def _replace_lookup_table_op_with_prefetch(
self, program, pserver_endpoints self, program, pserver_endpoints
): ):
from paddle.distributed.transpiler.details import delete_ops
# 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op # 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op
self.all_in_ids_vars = [] self.all_in_ids_vars = []
self.all_prefetch_input_vars = [] self.all_prefetch_input_vars = []
...@@ -2760,6 +2771,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -2760,6 +2771,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
def _create_ufind(self, optimize_ops): def _create_ufind(self, optimize_ops):
# Create a unit find data struct by optimize ops # Create a unit find data struct by optimize ops
from paddle.distributed.transpiler.details import UnionFind
ufind = UnionFind(optimize_ops) ufind = UnionFind(optimize_ops)
for i in range(len(optimize_ops)): for i in range(len(optimize_ops)):
for j in range(i, len(optimize_ops)): for j in range(i, len(optimize_ops)):
...@@ -2884,6 +2897,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -2884,6 +2897,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
return lr_ops return lr_ops
def _get_lr_ops_deprecated(self): def _get_lr_ops_deprecated(self):
from paddle.distributed.transpiler.details import UnionFind
lr_ops = [] lr_ops = []
# find learning rate variables by optimize op # find learning rate variables by optimize op
lr_vars = set() lr_vars = set()
......
...@@ -37,8 +37,11 @@ from ..framework import ( ...@@ -37,8 +37,11 @@ from ..framework import (
Block, Block,
Parameter, Parameter,
) )
from .details import wait_server_ready, VarsDistributed from paddle.distributed.transpiler.details import (
from .details import delete_ops wait_server_ready,
VarsDistributed,
)
from paddle.distributed.transpiler.details import delete_ops
from .distribute_transpiler import ( from .distribute_transpiler import (
DistributeTranspiler, DistributeTranspiler,
DistributeTranspilerConfig, DistributeTranspilerConfig,
......
...@@ -20,13 +20,13 @@ from functools import reduce ...@@ -20,13 +20,13 @@ from functools import reduce
import paddle import paddle
import paddle.framework as framework import paddle.framework as framework
from paddle.distributed.transpiler.details.program_utils import delete_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( from paddle.fluid.incubate.fleet.parameter_server.ir.public import (
_get_lr_ops, _get_lr_ops,
_get_optimize_ops, _get_optimize_ops,
get_sparse_tablenames, get_sparse_tablenames,
) )
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
from paddle.fluid.transpiler.details.program_utils import delete_ops
from paddle.framework import core from paddle.framework import core
OP_NAME_SCOPE = "op_namescope" OP_NAME_SCOPE = "op_namescope"
......
...@@ -383,6 +383,8 @@ packages=['paddle', ...@@ -383,6 +383,8 @@ packages=['paddle',
'paddle.distributed.passes', 'paddle.distributed.passes',
'paddle.distributed.models', 'paddle.distributed.models',
'paddle.distributed.models.moe', 'paddle.distributed.models.moe',
'paddle.distributed.transpiler',
'paddle.distributed.transpiler.details',
'paddle.framework', 'paddle.framework',
'paddle.jit', 'paddle.jit',
'paddle.jit.dy2static', 'paddle.jit.dy2static',
...@@ -399,7 +401,6 @@ packages=['paddle', ...@@ -399,7 +401,6 @@ packages=['paddle',
'paddle.fluid.contrib.extend_optimizer', 'paddle.fluid.contrib.extend_optimizer',
'paddle.fluid.contrib.layers', 'paddle.fluid.contrib.layers',
'paddle.fluid.transpiler', 'paddle.fluid.transpiler',
'paddle.fluid.transpiler.details',
'paddle.fluid.incubate', 'paddle.fluid.incubate',
'paddle.fluid.incubate.fleet', 'paddle.fluid.incubate.fleet',
'paddle.fluid.incubate.checkpoint', 'paddle.fluid.incubate.checkpoint',
......
...@@ -1270,6 +1270,8 @@ def get_setup_parameters(): ...@@ -1270,6 +1270,8 @@ def get_setup_parameters():
'paddle.distributed.passes', 'paddle.distributed.passes',
'paddle.distributed.models', 'paddle.distributed.models',
'paddle.distributed.models.moe', 'paddle.distributed.models.moe',
'paddle.distributed.transpiler',
'paddle.distributed.transpiler.details',
'paddle.framework', 'paddle.framework',
'paddle.jit', 'paddle.jit',
'paddle.jit.dy2static', 'paddle.jit.dy2static',
...@@ -1286,7 +1288,6 @@ def get_setup_parameters(): ...@@ -1286,7 +1288,6 @@ def get_setup_parameters():
'paddle.fluid.contrib.extend_optimizer', 'paddle.fluid.contrib.extend_optimizer',
'paddle.fluid.contrib.layers', 'paddle.fluid.contrib.layers',
'paddle.fluid.transpiler', 'paddle.fluid.transpiler',
'paddle.fluid.transpiler.details',
'paddle.fluid.incubate', 'paddle.fluid.incubate',
'paddle.fluid.incubate.fleet', 'paddle.fluid.incubate.fleet',
'paddle.fluid.incubate.checkpoint', 'paddle.fluid.incubate.checkpoint',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册