未验证 提交 a8b1d8d7 编写于 作者: W wangguanzhong 提交者: GitHub

remove fluid (#1900)

上级 a0fb35bf
...@@ -7,11 +7,8 @@ import socket ...@@ -7,11 +7,8 @@ import socket
import contextlib import contextlib
import numpy as np import numpy as np
from paddle import fluid
from paddle.io import BatchSampler from paddle.io import BatchSampler
from paddle.fluid.layers import collective
from paddle.distributed import ParallelEnv from paddle.distributed import ParallelEnv
from paddle.fluid.dygraph.parallel import ParallelStrategy
_parallel_context_initialized = False _parallel_context_initialized = False
...@@ -85,95 +82,3 @@ class DistributedBatchSampler(BatchSampler): ...@@ -85,95 +82,3 @@ class DistributedBatchSampler(BatchSampler):
def set_epoch(self, epoch): def set_epoch(self, epoch):
self.epoch = epoch self.epoch = epoch
def wait_server_ready(endpoints):
assert not isinstance(endpoints, six.string_types)
while True:
all_ok = True
not_ready_endpoints = []
for ep in endpoints:
ip_port = ep.split(":")
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0:
all_ok = False
not_ready_endpoints.append(ep)
if not all_ok:
time.sleep(3)
else:
break
def init_communicator(program, rank, nranks, wait_port, current_endpoint,
endpoints):
if nranks < 2:
return
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
wait_server_ready(other_endpoints)
block = program.global_block()
nccl_id_var = block.create_var(
name=fluid.unique_name.generate('nccl_id'),
persistable=True,
type=fluid.core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_nccl_id',
inputs={},
outputs={'Out': nccl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': 0,
})
def prepare_distributed_context(place=None):
if place is None:
place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
else fluid.CUDAPlace(0)
strategy = ParallelStrategy()
strategy.nranks = ParallelEnv().nranks
strategy.local_rank = ParallelEnv().local_rank
strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
strategy.current_endpoint = ParallelEnv().current_endpoint
if strategy.nranks < 2:
return
global _parallel_context_initialized
if not _parallel_context_initialized and isinstance(place, fluid.CUDAPlace):
def _init_context():
communicator_prog = fluid.Program()
init_communicator(communicator_prog, strategy.local_rank,
strategy.nranks, True, strategy.current_endpoint,
strategy.trainer_endpoints)
exe = fluid.Executor(place)
exe.run(communicator_prog)
fluid.disable_dygraph()
_init_context()
fluid.enable_dygraph(place)
else:
assert ("Only support CUDAPlace for now.")
_parallel_context_initialized = True
return strategy
...@@ -6,7 +6,6 @@ import numpy as np ...@@ -6,7 +6,6 @@ import numpy as np
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
from ppdet.core.workspace import register from ppdet.core.workspace import register
from ppdet.utils.data_structure import BufferDict
__all__ = ['BaseArch'] __all__ = ['BaseArch']
......
import numpy as np import numpy as np
import paddle.fluid as fluid
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -29,20 +28,20 @@ class Anchor(object): ...@@ -29,20 +28,20 @@ class Anchor(object):
rpn_delta_list = [] rpn_delta_list = []
anchor_list = [] anchor_list = []
for (rpn_score, rpn_delta), (anchor, var) in zip(rpn_feats, anchors): for (rpn_score, rpn_delta), (anchor, var) in zip(rpn_feats, anchors):
rpn_score = fluid.layers.transpose(rpn_score, perm=[0, 2, 3, 1]) rpn_score = paddle.transpose(rpn_score, perm=[0, 2, 3, 1])
rpn_delta = fluid.layers.transpose(rpn_delta, perm=[0, 2, 3, 1]) rpn_delta = paddle.transpose(rpn_delta, perm=[0, 2, 3, 1])
rpn_score = fluid.layers.reshape(x=rpn_score, shape=(0, -1, 1)) rpn_score = paddle.reshape(x=rpn_score, shape=(0, -1, 1))
rpn_delta = fluid.layers.reshape(x=rpn_delta, shape=(0, -1, 4)) rpn_delta = paddle.reshape(x=rpn_delta, shape=(0, -1, 4))
anchor = fluid.layers.reshape(anchor, shape=(-1, 4)) anchor = paddle.reshape(anchor, shape=(-1, 4))
var = fluid.layers.reshape(var, shape=(-1, 4)) var = paddle.reshape(var, shape=(-1, 4))
rpn_score_list.append(rpn_score) rpn_score_list.append(rpn_score)
rpn_delta_list.append(rpn_delta) rpn_delta_list.append(rpn_delta)
anchor_list.append(anchor) anchor_list.append(anchor)
rpn_scores = fluid.layers.concat(rpn_score_list, axis=1) rpn_scores = paddle.concat(rpn_score_list, axis=1)
rpn_deltas = fluid.layers.concat(rpn_delta_list, axis=1) rpn_deltas = paddle.concat(rpn_delta_list, axis=1)
anchors = fluid.layers.concat(anchor_list) anchors = paddle.concat(anchor_list)
return rpn_scores, rpn_deltas, anchors return rpn_scores, rpn_deltas, anchors
def generate_loss_inputs(self, inputs, rpn_head_out, anchors): def generate_loss_inputs(self, inputs, rpn_head_out, anchors):
...@@ -102,7 +101,7 @@ class Proposal(object): ...@@ -102,7 +101,7 @@ class Proposal(object):
rpn_rois_num_list = [] rpn_rois_num_list = []
for (rpn_score, rpn_delta), (anchor, var) in zip(rpn_head_out, for (rpn_score, rpn_delta), (anchor, var) in zip(rpn_head_out,
anchor_out): anchor_out):
rpn_prob = fluid.layers.sigmoid(rpn_score) rpn_prob = F.sigmoid(rpn_score)
rpn_rois, rpn_rois_prob, rpn_rois_num, post_nms_top_n = self.proposal_generator( rpn_rois, rpn_rois_prob, rpn_rois_num, post_nms_top_n = self.proposal_generator(
scores=rpn_prob, scores=rpn_prob,
bbox_deltas=rpn_delta, bbox_deltas=rpn_delta,
......
import numpy as np import numpy as np
import paddle.fluid as fluid
from ppdet.core.workspace import register from ppdet.core.workspace import register
......
...@@ -14,13 +14,12 @@ ...@@ -14,13 +14,12 @@
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle import ParamAttr from paddle import ParamAttr
from paddle.nn import Layer from paddle.nn import Layer
from paddle.nn import Conv2D from paddle.nn import Conv2D
from paddle.nn.initializer import XavierUniform from paddle.nn.initializer import XavierUniform
from paddle.fluid.regularizer import L2Decay from paddle.regularizer import L2Decay
from ppdet.core.workspace import register, serializable from ppdet.core.workspace import register, serializable
......
...@@ -23,7 +23,7 @@ import paddle ...@@ -23,7 +23,7 @@ import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.optimizer as optimizer import paddle.optimizer as optimizer
import paddle.fluid.regularizer as regularizer import paddle.regularizer as regularizer
from paddle import cos from paddle import cos
from ppdet.core.workspace import register, serializable from ppdet.core.workspace import register, serializable
......
...@@ -19,8 +19,6 @@ from __future__ import print_function ...@@ -19,8 +19,6 @@ from __future__ import print_function
import logging import logging
import numpy as np import numpy as np
import paddle.fluid as fluid
__all__ = ["bbox_overlaps", "box_to_delta"] __all__ = ["bbox_overlaps", "box_to_delta"]
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -19,7 +19,6 @@ from __future__ import print_function ...@@ -19,7 +19,6 @@ from __future__ import print_function
import sys import sys
import paddle import paddle
from paddle import fluid
import logging import logging
import six import six
import paddle.version as fluid_version import paddle.version as fluid_version
...@@ -40,7 +39,7 @@ def check_gpu(use_gpu): ...@@ -40,7 +39,7 @@ def check_gpu(use_gpu):
"model on CPU" "model on CPU"
try: try:
if use_gpu and not fluid.is_compiled_with_cuda(): if use_gpu and not paddle.is_compiled_with_cuda():
logger.error(err) logger.error(err)
sys.exit(1) sys.exit(1)
except Exception as e: except Exception as e:
......
...@@ -23,7 +23,6 @@ import time ...@@ -23,7 +23,6 @@ import time
import re import re
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
from .download import get_weights_path from .download import get_weights_path
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import paddle.fluid as fluid
def nccl2_prepare(trainer_id, startup_prog, main_prog):
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
t = fluid.DistributeTranspiler(config=config)
t.transpile(
trainer_id,
trainers=os.environ.get('PADDLE_TRAINER_ENDPOINTS'),
current_endpoint=os.environ.get('PADDLE_CURRENT_ENDPOINT'),
startup_program=startup_prog,
program=main_prog)
def prepare_for_multi_process(exe, build_strategy, startup_prog, main_prog):
trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0))
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
if num_trainers < 2:
return
build_strategy.num_trainers = num_trainers
build_strategy.trainer_id = trainer_id
nccl2_prepare(trainer_id, startup_prog, main_prog)
...@@ -19,7 +19,6 @@ from __future__ import print_function ...@@ -19,7 +19,6 @@ from __future__ import print_function
import logging import logging
import numpy as np import numpy as np
import cv2 import cv2
import paddle.fluid as fluid
__all__ = ['nms'] __all__ = ['nms']
......
...@@ -24,8 +24,6 @@ from collections import OrderedDict ...@@ -24,8 +24,6 @@ from collections import OrderedDict
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import paddle.fluid as fluid
__all__ = ['dump_infer_config', 'save_infer_model'] __all__ = ['dump_infer_config', 'save_infer_model']
# Global dictionary # Global dictionary
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册