collective_allgather_api.py 1.7 KB
Newer Older
Z
zn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import argparse
import os
import sys
import signal
import time
import socket
from contextlib import closing
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
33 34 35 36
from test_collective_api_base_mlu import (
    TestCollectiveAPIRunnerBase,
    runtime_main,
)
Z
zn 已提交
37 38 39 40 41 42 43 44 45 46 47

paddle.enable_static()


class TestCollectiveAllgatherAPI(TestCollectiveAPIRunnerBase):
    def __init__(self):
        self.global_ring_id = 0

    def get_model(self, main_prog, startup_program, rank):
        with fluid.program_guard(main_prog, startup_program):
            tensor_list = []
G
GGBond8488 已提交
48 49
            tindata = paddle.static.data(
                name="tindata", shape=[-1, 10, 1000], dtype='float32'
50
            )
Z
zn 已提交
51 52 53 54 55 56
            paddle.distributed.all_gather(tensor_list, tindata)
            return tensor_list


if __name__ == "__main__":
    runtime_main(TestCollectiveAllgatherAPI, "allgather")