未验证 提交 d424e5b4 编写于 作者: Y Yan Xu 提交者: GitHub

add launch mp distributed job py module test=develop (#15620)

* add launch mp distributed mode module test=develop

* delete unused file test=develop

* refine usage test=develop

* refine usage test=develop

* move distributed package test=develop

* add to whl package test=develop
上级 381f2015
...@@ -25,4 +25,5 @@ import paddle.reader ...@@ -25,4 +25,5 @@ import paddle.reader
import paddle.dataset import paddle.dataset
import paddle.batch import paddle.batch
import paddle.compat import paddle.compat
import paddle.distributed
batch = batch.batch batch = batch.batch
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
...@@ -37,7 +37,7 @@ default_envs = { ...@@ -37,7 +37,7 @@ default_envs = {
GPUS = 8 GPUS = 8
def start_procs(gpus, cmd, log_dir): def start_procs(gpus, entrypoint, entrypoint_args, log_dir):
procs = [] procs = []
log_fns = [] log_fns = []
os.system("mkdir -p %s" % log_dir) os.system("mkdir -p %s" % log_dir)
...@@ -73,12 +73,11 @@ def start_procs(gpus, cmd, log_dir): ...@@ -73,12 +73,11 @@ def start_procs(gpus, cmd, log_dir):
"PADDLE_TRAINER_ENDPOINTS": all_nodes_devices_endpoints "PADDLE_TRAINER_ENDPOINTS": all_nodes_devices_endpoints
}) })
print("starting process ", i, cmd, curr_env) print("starting process ", i, entrypoint, entrypoint_args, curr_env)
fn = open("%s/workerlog.%d" % (log_dir, i), "w") fn = open("%s/workerlog.%d" % (log_dir, i), "w")
log_fns.append(fn) log_fns.append(fn)
procs.append( cmd = [sys.executable, "-u", entrypoint] + entrypoint_args
subprocess.Popen( procs.append(subprocess.Popen(cmd, stdout=fn, stderr=fn, env=curr_env))
cmd.strip().split(" "), stdout=fn, stderr=fn, env=curr_env))
for i in range(gpus): for i in range(gpus):
try: try:
...@@ -89,7 +88,8 @@ def start_procs(gpus, cmd, log_dir): ...@@ -89,7 +88,8 @@ def start_procs(gpus, cmd, log_dir):
pass pass
def main(): def parse_args():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='''start paddle training using multi-process mode. description='''start paddle training using multi-process mode.
NOTE: your train program ***must*** run as distributed nccl2 mode, NOTE: your train program ***must*** run as distributed nccl2 mode,
...@@ -108,21 +108,27 @@ POD_IP (current node ip address, not needed for local training) ...@@ -108,21 +108,27 @@ POD_IP (current node ip address, not needed for local training)
type=int, type=int,
default=8, default=8,
help='start number of processes for every gpu') help='start number of processes for every gpu')
parser.add_argument(
'--cmd',
type=str,
default="",
help='command to run for each process, e.g. python train.py --lr 0.1')
parser.add_argument( parser.add_argument(
'--log_dir', '--log_dir',
type=str, type=str,
default="mylog", default="mylog",
help='directory to put logs per process.') help='directory to put logs per process.')
args = parser.parse_args() parser.add_argument(
if args.cmd == "": 'entrypoint_script',
parser.print_help() type=str,
exit(0) help="The entrypoint script to be launched in parallel,"
start_procs(args.gpus, args.cmd, args.log_dir) "followed by all the arguments for each process,"
"e.g. train.py --lr 0.1")
parser.add_argument('entrypoint_args', nargs=argparse.REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# launch multiple training process
start_procs(args.gpus, args.entrypoint_script, args.entrypoint_args,
args.log_dir)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -161,7 +161,6 @@ def __bootstrap__(): ...@@ -161,7 +161,6 @@ def __bootstrap__():
'times_excess_than_required_tmp_allocation', 'times_excess_than_required_tmp_allocation',
'enable_inplace_whitelist' 'enable_inplace_whitelist'
] ]
core.init_gflags([sys.argv[0]] + core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)]) ["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0]) core.init_glog(sys.argv[0])
......
...@@ -100,6 +100,7 @@ packages=['paddle', ...@@ -100,6 +100,7 @@ packages=['paddle',
'paddle.utils', 'paddle.utils',
'paddle.dataset', 'paddle.dataset',
'paddle.reader', 'paddle.reader',
'paddle.distributed',
'paddle.fluid', 'paddle.fluid',
'paddle.fluid.imperative', 'paddle.fluid.imperative',
'paddle.fluid.proto', 'paddle.fluid.proto',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册