test_distributed.py 7.3 KB
Newer Older
1 2 3 4 5
# -*- coding: utf-8 -*-
import multiprocessing as mp
import platform
import queue

6
import numpy as np
7 8 9 10
import pytest

import megengine as mge
import megengine.distributed as dist
11
from megengine.core.ops.builtin import CollectiveComm, ParamPackConcat, ParamPackSplit
12
from megengine.device import get_default_device
13
from megengine.distributed.helper import param_pack_concat, param_pack_split
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29


def _assert_q_empty(q):
    try:
        res = q.get(timeout=1)
    except Exception as e:
        assert isinstance(e, queue.Empty)
    else:
        assert False, "queue is not empty"


def _assert_q_val(q, val):
    ret = q.get()
    assert ret == val


30 31
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("backend", ["nccl"])
32
@pytest.mark.isolated_distributed
33
def test_init_process_group(backend):
34
    world_size = 2
35 36
    server = dist.Server()
    port = server.py_server_port
37

38
    def worker(rank):
39 40 41 42 43 44 45 46 47 48 49 50 51 52
        dist.init_process_group("localhost", port, world_size, rank, rank, backend)
        assert dist.is_distributed() == True
        assert dist.get_rank() == rank
        assert dist.get_world_size() == world_size
        assert dist.get_backend() == backend

        py_server_addr = dist.get_py_server_addr()
        assert py_server_addr[0] == "localhost"
        assert py_server_addr[1] == port

        mm_server_addr = dist.get_mm_server_addr()
        assert mm_server_addr[0] == "localhost"
        assert mm_server_addr[1] > 0

53
        assert isinstance(dist.get_client(), dist.server._Client)
54

55 56 57 58 59
    procs = []
    for rank in range(world_size):
        p = mp.Process(target=worker, args=(rank,))
        p.start()
        procs.append(p)
60

61 62 63
    for p in procs:
        p.join(20)
        assert p.exitcode == 0
64 65


66
@pytest.mark.require_ngpu(3)
67 68 69 70 71
@pytest.mark.isolated_distributed
def test_new_group():
    world_size = 3
    ranks = [2, 0]

72 73 74
    @dist.launcher
    def worker():
        rank = dist.get_rank()
75 76 77 78 79
        if rank in ranks:
            group = dist.new_group(ranks)
            assert group.size == 2
            assert group.key == "2,0"
            assert group.rank == ranks.index(rank)
80 81
            dt = get_default_device()[:-1]
            assert group.comp_node == "{}{}:2".format(dt, rank)
82

83
    worker()
84 85


86
@pytest.mark.require_ngpu(2)
87 88 89
@pytest.mark.isolated_distributed
def test_group_barrier():
    world_size = 2
90 91
    server = dist.Server()
    port = server.py_server_port
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115

    def worker(rank, q):
        dist.init_process_group("localhost", port, world_size, rank, rank)
        dist.group_barrier()
        if rank == 0:
            dist.group_barrier()
            q.put(0)  # to be observed in rank 1
        else:
            _assert_q_empty(q)  # q.put(0) is not executed in rank 0
            dist.group_barrier()
            _assert_q_val(q, 0)  # q.put(0) executed in rank 0

    Q = mp.Queue()
    procs = []
    for rank in range(world_size):
        p = mp.Process(target=worker, args=(rank, Q))
        p.start()
        procs.append(p)

    for p in procs:
        p.join(20)
        assert p.exitcode == 0


116
@pytest.mark.require_ngpu(2)
117 118 119
@pytest.mark.isolated_distributed
def test_synchronized():
    world_size = 2
120 121
    server = dist.Server()
    port = server.py_server_port
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

    @dist.synchronized
    def func(rank, q):
        q.put(rank)

    def worker(rank, q):
        dist.init_process_group("localhost", port, world_size, rank, rank)
        dist.group_barrier()
        if rank == 0:
            func(0, q)  # q.put(0)
            q.put(2)
        else:
            _assert_q_val(q, 0)  # func executed in rank 0
            _assert_q_empty(q)  # q.put(2) is not executed
            func(1, q)
            _assert_q_val(
                q, 1
            )  # func in rank 1 executed earlier than q.put(2) in rank 0
            _assert_q_val(q, 2)  # q.put(2) executed in rank 0

    Q = mp.Queue()
    procs = []
    for rank in range(world_size):
        p = mp.Process(target=worker, args=(rank, Q))
        p.start()
        procs.append(p)

    for p in procs:
        p.join(20)
        assert p.exitcode == 0
152 153


154
@pytest.mark.require_ngpu(2)
155 156
@pytest.mark.isolated_distributed
def test_user_set_get():
157 158
    @dist.launcher
    def worker():
159 160 161 162 163 164
        # set in race condition
        dist.get_client().user_set("foo", 1)
        # get in race condition
        ret = dist.get_client().user_get("foo")
        assert ret == 1

165
    worker()
166 167


168 169 170 171 172
def test_oprmm_hashable():
    lhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
    rhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
    assert lhs == rhs
    assert hash(lhs) == hash(rhs)
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188


def test_param_pack_split():
    a = mge.Tensor(np.ones((10,), np.int32))
    b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
    assert np.allclose(b.numpy(), a.numpy()[1])
    assert np.allclose(c.numpy(), a.numpy()[1:].reshape(3, 3))


def test_param_pack_concat():
    a = mge.Tensor(np.ones((1,), np.int32))
    b = mge.Tensor(np.ones((3, 3), np.int32))
    offsets_val = [0, 1, 1, 10]
    offsets = mge.Tensor(offsets_val, np.int32)
    c = param_pack_concat([a, b], offsets, offsets_val)
    assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())
189 190 191 192


@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("early_return", [False, True], ids=["common", "early_return"])
193
@pytest.mark.parametrize("output_size", [10, 10000], ids=["small_size", "large_size"])
194
@pytest.mark.isolated_distributed
195
def test_collect_results(early_return, output_size):
196 197 198 199
    @dist.launcher
    def worker():
        if early_return:
            exit(0)
200
        return [dist.get_rank()] * output_size
201 202 203 204 205 206 207

    results = worker()
    world_size = len(results)
    assert world_size > 0
    expects = (
        [None] * world_size
        if early_return
208
        else [[dev] * output_size for dev in range(world_size)]
209 210
    )
    assert results == expects
211 212 213 214 215 216 217 218 219 220 221 222 223 224


@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_user_set_pop():
    @dist.launcher
    def worker():
        # set in race condition
        dist.get_client().user_set("foo", 1)
        if dist.get_rank() == 1:
            ret = dist.get_client().user_pop("foo")
            assert ret == 1

    worker()
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239


@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_get_cuda_compute_capability():

    assert mge.device.get_cuda_compute_capability(0) > 0
    assert mge.device.get_cuda_compute_capability(1) > 0

    @dist.launcher
    def worker():
        x = mge.tensor([1.0])
        assert mge.device.get_cuda_compute_capability(dist.get_rank()) > 0

    worker()
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268


@pytest.mark.require_ngpu(3)
@pytest.mark.isolated_distributed
def test_batch_send_recv():
    import megengine.distributed.functional as DF

    @dist.launcher(n_gpus=3)
    def worker():
        rank = dist.get_rank()
        dist.group_start()
        for i in range(3):
            tensor = mge.tensor(np.ones(10000)) * rank
            if i == 2:
                tensor *= i
            DF._remote_send_nobackward(tensor, (rank + 1) % 3)
            DF._remote_recv_nobackward(
                src_rank=(rank + 1) % 3, dtype="float32", shape=(10000,)
            )
            DF._remote_send_nobackward(tensor, (rank - 1) % 3)
            recv = DF._remote_recv_nobackward(
                src_rank=(rank - 1) % 3, dtype="float32", shape=(10000,)
            )
            if i == 2:
                recv2 = recv
        dist.group_end()
        np.testing.assert_equal(recv2.numpy(), (rank - 1) % 3 * 2 * np.ones(10000))

    worker()