collective_allgather_api.py 3.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import argparse
import os
import sys
import signal
import time
import socket
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
33
import pickle
34 35 36
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
37
import test_collective_api_base as test_base
38

P
pangyoki 已提交
39 40
paddle.enable_static()

41

42
class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase):
43

44 45 46
    def __init__(self):
        self.global_ring_id = 0

47 48
    def get_model(self, main_prog, startup_program, rank, dtype=None):
        dtype = "float32" if dtype is None else dtype
49 50
        with fluid.program_guard(main_prog, startup_program):
            tensor_list = []
51
            tindata = layers.data(name="tindata", shape=[10, 1000], dtype=dtype)
52 53 54
            paddle.distributed.all_gather(tensor_list, tindata)
            return tensor_list

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
    def run_trainer(self, args):
        train_prog = fluid.Program()
        startup_prog = fluid.Program()
        endpoints = args["endpoints"].split(",")
        rank = args["trainerid"]
        current_endpoint = args["currentendpoint"]
        nranks = 2
        paddle.distributed.init_parallel_env()
        if args['backend'] == 'nccl':
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(
                device_id)  #if args.use_gpu else fluid.CPUPlace()
        elif args['backend'] == 'bkcl':
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
            place = fluid.CPUPlace()
        indata = test_base.create_test_data(shape=(10, 1000),
                                            dtype=args["dtype"],
                                            seed=os.getpid())
        assert args[
            'static_mode'] == 1, "collective_allgather_api only support static mode"
        result = self.get_model(train_prog,
                                startup_prog,
                                rank,
                                dtype=args["dtype"])
        exe = fluid.Executor(place)
        exe.run(startup_prog)
        fetch_list = []
        for elem in result:
            fetch_list.append(elem.name)
        out = exe.run(train_prog,
                      feed={'tindata': indata},
                      fetch_list=fetch_list)
        sys.stdout.buffer.write(pickle.dumps(out))

91 92

if __name__ == "__main__":
93
    test_base.runtime_main(TestCollectiveAllgatherAPI, "allgather")