test_async_read_write.py 6.2 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14 15 16 17 18 19 20
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np

import paddle
from paddle.fluid import core
from paddle.device import cuda
W
wanghuancoder 已提交
21
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
22 23 24


class TestAsyncRead(unittest.TestCase):
W
wanghuancoder 已提交
25
    def func_setUp(self):
26 27 28
        self.empty = paddle.to_tensor(
            np.array([], dtype="int64"), place=paddle.CPUPlace()
        )
29 30 31
        data = np.random.randn(100, 50, 50).astype("float32")
        self.src = paddle.to_tensor(data, place=paddle.CUDAPinnedPlace())
        self.dst = paddle.empty(shape=[100, 50, 50], dtype="float32")
32 33 34 35 36 37
        self.index = paddle.to_tensor(
            np.array([1, 3, 5, 7, 9], dtype="int64")
        ).cpu()
        self.buffer = paddle.empty(
            shape=[50, 50, 50], dtype="float32"
        ).pin_memory()
38 39
        self.stream = cuda.Stream()

W
wanghuancoder 已提交
40
    def func_test_async_read_empty_offset_and_count(self):
41
        with cuda.stream_guard(self.stream):
W
wanghuancoder 已提交
42
            if _in_legacy_dygraph():
43 44 45 46 47 48 49 50
                core.async_read(
                    self.src,
                    self.dst,
                    self.index,
                    self.buffer,
                    self.empty,
                    self.empty,
                )
W
wanghuancoder 已提交
51
            else:
52 53 54 55 56 57 58 59
                core.eager.async_read(
                    self.src,
                    self.dst,
                    self.index,
                    self.buffer,
                    self.empty,
                    self.empty,
                )
60
        array1 = paddle.gather(self.src, self.index)
61
        array2 = self.dst[: len(self.index)]
62

63
        np.testing.assert_allclose(array1.numpy(), array2.numpy(), rtol=1e-05)
64

W
wanghuancoder 已提交
65
    def func_test_async_read_success(self):
66 67 68 69 70 71
        offset = paddle.to_tensor(
            np.array([10, 20], dtype="int64"), place=paddle.CPUPlace()
        )
        count = paddle.to_tensor(
            np.array([5, 10], dtype="int64"), place=paddle.CPUPlace()
        )
72
        with cuda.stream_guard(self.stream):
W
wanghuancoder 已提交
73
            if _in_legacy_dygraph():
74 75 76
                core.async_read(
                    self.src, self.dst, self.index, self.buffer, offset, count
                )
W
wanghuancoder 已提交
77
            else:
78 79 80
                core.eager.async_read(
                    self.src, self.dst, self.index, self.buffer, offset, count
                )
81 82 83
        # index data
        index_array1 = paddle.gather(self.src, self.index)
        count_numel = paddle.sum(count).numpy()[0]
84 85 86 87
        index_array2 = self.dst[count_numel : count_numel + len(self.index)]
        np.testing.assert_allclose(
            index_array1.numpy(), index_array2.numpy(), rtol=1e-05
        )
88 89 90 91 92 93

        # offset, count
        offset_a = paddle.gather(self.src, paddle.to_tensor(np.arange(10, 15)))
        offset_b = paddle.gather(self.src, paddle.to_tensor(np.arange(20, 30)))
        offset_array1 = paddle.concat([offset_a, offset_b], axis=0)
        offset_array2 = self.dst[:count_numel]
94 95 96
        np.testing.assert_allclose(
            offset_array1.numpy(), offset_array2.numpy(), rtol=1e-05
        )
97

W
wanghuancoder 已提交
98
    def func_test_async_read_only_1dim(self):
99 100 101 102
        src = paddle.rand([40], dtype="float32").pin_memory()
        dst = paddle.empty([40], dtype="float32")
        buffer_ = paddle.empty([20]).pin_memory()
        with cuda.stream_guard(self.stream):
W
wanghuancoder 已提交
103
            if _in_legacy_dygraph():
104 105 106
                core.async_read(
                    src, dst, self.index, buffer_, self.empty, self.empty
                )
W
wanghuancoder 已提交
107
            else:
108 109 110
                core.eager.async_read(
                    src, dst, self.index, buffer_, self.empty, self.empty
                )
111
        array1 = paddle.gather(src, self.index)
112
        array2 = dst[: len(self.index)]
113
        np.testing.assert_allclose(array1.numpy(), array2.numpy(), rtol=1e-05)
114

W
wanghuancoder 已提交
115 116 117 118
    def test_main(self):
        with _test_eager_guard():
            self.func_setUp()
            self.func_test_async_read_empty_offset_and_count()
119
            self.func_setUp()
W
wanghuancoder 已提交
120
            self.func_test_async_read_success()
121
            self.func_setUp()
W
wanghuancoder 已提交
122 123 124 125 126 127 128 129
            self.func_test_async_read_only_1dim()
        self.func_setUp()
        self.func_test_async_read_empty_offset_and_count()
        self.func_setUp()
        self.func_test_async_read_success()
        self.func_setUp()
        self.func_test_async_read_only_1dim()

130 131

class TestAsyncWrite(unittest.TestCase):
W
wanghuancoder 已提交
132
    def func_setUp(self):
133
        self.src = paddle.rand(shape=[100, 50, 50, 5], dtype="float32")
134 135 136
        self.dst = paddle.empty(
            shape=[200, 50, 50, 5], dtype="float32"
        ).pin_memory()
137 138
        self.stream = cuda.Stream()

W
wanghuancoder 已提交
139
    def func_test_async_write_success(self):
140 141 142 143 144 145
        offset = paddle.to_tensor(
            np.array([0, 60], dtype="int64"), place=paddle.CPUPlace()
        )
        count = paddle.to_tensor(
            np.array([40, 60], dtype="int64"), place=paddle.CPUPlace()
        )
146
        with cuda.stream_guard(self.stream):
W
wanghuancoder 已提交
147 148 149 150
            if _in_legacy_dygraph():
                core.async_write(self.src, self.dst, offset, count)
            else:
                core.eager.async_write(self.src, self.dst, offset, count)
151 152 153 154

        offset_a = paddle.gather(self.dst, paddle.to_tensor(np.arange(0, 40)))
        offset_b = paddle.gather(self.dst, paddle.to_tensor(np.arange(60, 120)))
        offset_array = paddle.concat([offset_a, offset_b], axis=0)
155 156 157
        np.testing.assert_allclose(
            self.src.numpy(), offset_array.numpy(), rtol=1e-05
        )
158

W
wanghuancoder 已提交
159 160 161 162 163 164 165
    def test_async_write_success(self):
        with _test_eager_guard():
            self.func_setUp()
            self.func_test_async_write_success()
        self.func_setUp()
        self.func_test_async_write_success()

166 167 168 169

if __name__ == "__main__":
    if core.is_compiled_with_cuda():
        unittest.main()