test_tensor.py 12.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

W
wopeizl 已提交
17
import paddle.fluid as fluid
18
import paddle.fluid.core as core
Y
Yu Yang 已提交
19 20
import unittest
import numpy
21
import numbers
Y
Yu Yang 已提交
22 23


24
class TestTensor(unittest.TestCase):
L
Leo Chen 已提交
25 26 27 28 29 30
    def setUp(self):
        self.support_dtypes = [
            'bool', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
            'float32', 'float64'
        ]

31
    def test_int_tensor(self):
Y
Yu Yang 已提交
32
        scope = core.Scope()
D
dongzhihong 已提交
33
        var = scope.var("test_tensor")
Q
qijun 已提交
34
        place = core.CPUPlace()
Q
qijun 已提交
35

Y
Yu Yang 已提交
36 37
        tensor = var.get_tensor()

Y
yuyang18 已提交
38
        tensor._set_dims([1000, 784])
Y
yuyang18 已提交
39
        tensor._alloc_int(place)
Y
Yu Yang 已提交
40 41 42 43
        tensor_array = numpy.array(tensor)
        self.assertEqual((1000, 784), tensor_array.shape)
        tensor_array[3, 9] = 1
        tensor_array[19, 11] = 2
Q
qijun 已提交
44
        tensor.set(tensor_array, place)
Y
Yu Yang 已提交
45 46

        tensor_array_2 = numpy.array(tensor)
47 48
        self.assertEqual(1, tensor_array_2[3, 9])
        self.assertEqual(2, tensor_array_2[19, 11])
Y
Yu Yang 已提交
49

50
    def test_float_tensor(self):
Y
Yu Yang 已提交
51
        scope = core.Scope()
D
dongzhihong 已提交
52
        var = scope.var("test_tensor")
Q
qijun 已提交
53
        place = core.CPUPlace()
Q
qijun 已提交
54

Y
Yu Yang 已提交
55 56
        tensor = var.get_tensor()

Y
yuyang18 已提交
57
        tensor._set_dims([1000, 784])
Y
yuyang18 已提交
58
        tensor._alloc_float(place)
Y
Yu Yang 已提交
59 60 61 62 63

        tensor_array = numpy.array(tensor)
        self.assertEqual((1000, 784), tensor_array.shape)
        tensor_array[3, 9] = 1.0
        tensor_array[19, 11] = 2.0
Q
qijun 已提交
64
        tensor.set(tensor_array, place)
Y
Yu Yang 已提交
65 66 67 68 69

        tensor_array_2 = numpy.array(tensor)
        self.assertAlmostEqual(1.0, tensor_array_2[3, 9])
        self.assertAlmostEqual(2.0, tensor_array_2[19, 11])

Q
qingqing01 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
    def test_int8_tensor(self):
        scope = core.Scope()
        var = scope.var("int8_tensor")
        cpu_tensor = var.get_tensor()
        tensor_array = numpy.random.randint(
            -127, high=128, size=[100, 200], dtype=numpy.int8)
        place = core.CPUPlace()
        cpu_tensor.set(tensor_array, place)
        cpu_tensor_array_2 = numpy.array(cpu_tensor)
        self.assertAlmostEqual(cpu_tensor_array_2.all(), tensor_array.all())

        if core.is_compiled_with_cuda():
            cuda_tensor = var.get_tensor()
            tensor_array = numpy.random.randint(
                -127, high=128, size=[100, 200], dtype=numpy.int8)
            place = core.CUDAPlace(0)
            cuda_tensor.set(tensor_array, place)
            cuda_tensor_array_2 = numpy.array(cuda_tensor)
            self.assertAlmostEqual(cuda_tensor_array_2.all(),
                                   tensor_array.all())

91
    def test_int_lod_tensor(self):
92
        place = core.CPUPlace()
93
        scope = core.Scope()
D
dongzhihong 已提交
94
        var_lod = scope.var("test_lod_tensor")
95 96
        lod_tensor = var_lod.get_tensor()

Y
yuyang18 已提交
97
        lod_tensor._set_dims([4, 4, 6])
Y
yuyang18 已提交
98
        lod_tensor._alloc_int(place)
99 100 101 102
        array = numpy.array(lod_tensor)
        array[0, 0, 0] = 3
        array[3, 3, 5] = 10
        lod_tensor.set(array, place)
103
        lod_tensor.set_recursive_sequence_lengths([[2, 2]])
104 105 106 107

        lod_v = numpy.array(lod_tensor)
        self.assertTrue(numpy.alltrue(array == lod_v))

108 109
        lod = lod_tensor.recursive_sequence_lengths()
        self.assertEqual(2, lod[0][0])
110 111 112
        self.assertEqual(2, lod[0][1])

    def test_float_lod_tensor(self):
113
        place = core.CPUPlace()
114
        scope = core.Scope()
D
dongzhihong 已提交
115
        var_lod = scope.var("test_lod_tensor")
116 117

        lod_tensor = var_lod.get_tensor()
Y
yuyang18 已提交
118
        lod_tensor._set_dims([5, 2, 3, 4])
Y
yuyang18 已提交
119
        lod_tensor._alloc_float(place)
120 121 122

        tensor_array = numpy.array(lod_tensor)
        self.assertEqual((5, 2, 3, 4), tensor_array.shape)
123 124
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
125 126 127 128 129
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
130
        self.assertEqual(len(lod_tensor.recursive_sequence_lengths()), 0)
131

132 133 134
        lod_py = [[2, 1], [1, 2, 2]]
        lod_tensor.set_recursive_sequence_lengths(lod_py)
        lod = lod_tensor.recursive_sequence_lengths()
135 136 137 138
        self.assertListEqual(lod_py, lod)

    def test_lod_tensor_init(self):
        place = core.CPUPlace()
139
        lod_py = [[2, 1], [1, 2, 2]]
D
dzhwinter 已提交
140
        lod_tensor = core.LoDTensor()
141

Y
yuyang18 已提交
142
        lod_tensor._set_dims([5, 2, 3, 4])
143
        lod_tensor.set_recursive_sequence_lengths(lod_py)
Y
yuyang18 已提交
144
        lod_tensor._alloc_float(place)
D
dzhwinter 已提交
145 146 147 148 149 150 151 152
        tensor_array = numpy.array(lod_tensor)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
153
        self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths())
D
dzhwinter 已提交
154 155 156 157 158

    def test_lod_tensor_gpu_init(self):
        if not core.is_compiled_with_cuda():
            return
        place = core.CUDAPlace(0)
159
        lod_py = [[2, 1], [1, 2, 2]]
D
dzhwinter 已提交
160 161
        lod_tensor = core.LoDTensor()

Y
yuyang18 已提交
162
        lod_tensor._set_dims([5, 2, 3, 4])
163
        lod_tensor.set_recursive_sequence_lengths(lod_py)
Y
yuyang18 已提交
164
        lod_tensor._alloc_float(place)
165 166 167 168
        tensor_array = numpy.array(lod_tensor)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)
169

170
        lod_v = numpy.array(lod_tensor)
171 172
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
173
        self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths())
174

Q
Qiao Longfei 已提交
175 176 177 178 179 180
    def test_empty_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var = scope.var("test_tensor")

        tensor = var.get_tensor()
Y
yuyang18 已提交
181
        tensor._set_dims([0, 1])
Y
yuyang18 已提交
182
        tensor._alloc_float(place)
Q
Qiao Longfei 已提交
183 184 185 186 187 188

        tensor_array = numpy.array(tensor)
        self.assertEqual((0, 1), tensor_array.shape)

        if core.is_compiled_with_cuda():
            gpu_place = core.CUDAPlace(0)
Y
yuyang18 已提交
189
            tensor._alloc_float(gpu_place)
Q
Qiao Longfei 已提交
190 191 192
            tensor_array = numpy.array(tensor)
            self.assertEqual((0, 1), tensor_array.shape)

L
Leo Chen 已提交
193
    def run_slice_tensor(self, place, dtype):
W
wopeizl 已提交
194 195 196 197
        tensor = fluid.Tensor()
        shape = [3, 3, 3]
        tensor._set_dims(shape)

L
Leo Chen 已提交
198 199 200 201
        tensor_array = numpy.array(
            [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
             [[10, 11, 12], [13, 14, 15], [16, 17, 18]],
             [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype(dtype)
W
wopeizl 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

        tensor.set(tensor_array, place)
        n1 = tensor[1]
        t1 = tensor_array[1]
        self.assertTrue((numpy.array(n1) == numpy.array(t1)).all())

        n2 = tensor[1:]
        t2 = tensor_array[1:]
        self.assertTrue((numpy.array(n2) == numpy.array(t2)).all())

        n3 = tensor[0:2:]
        t3 = tensor_array[0:2:]
        self.assertTrue((numpy.array(n3) == numpy.array(t3)).all())

        n4 = tensor[2::-2]
        t4 = tensor_array[2::-2]
        self.assertTrue((numpy.array(n4) == numpy.array(t4)).all())

        n5 = tensor[2::-2][0]
        t5 = tensor_array[2::-2][0]
        self.assertTrue((numpy.array(n5) == numpy.array(t5)).all())

        n6 = tensor[2:-1:-1]
        t6 = tensor_array[2:-1:-1]
        self.assertTrue((numpy.array(n6) == numpy.array(t6)).all())

        n7 = tensor[0:, 0:]
        t7 = tensor_array[0:, 0:]
        self.assertTrue((numpy.array(n7) == numpy.array(t7)).all())

        n8 = tensor[0::1, 0::-1, 2:]
        t8 = tensor_array[0::1, 0::-1, 2:]
        self.assertTrue((numpy.array(n8) == numpy.array(t8)).all())

L
Leo Chen 已提交
236 237 238 239 240
    def test_slice_tensor(self):
        for dtype in self.support_dtypes:
            # run cpu first
            place = core.CPUPlace()
            self.run_slice_tensor(place, dtype)
W
wopeizl 已提交
241

L
Leo Chen 已提交
242 243 244
            if core.is_compiled_with_cuda():
                place = core.CUDAPlace(0)
                self.run_slice_tensor(place, dtype)
W
wopeizl 已提交
245

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
    def test_print_tensor(self):
        scope = core.Scope()
        var = scope.var("test_tensor")
        place = core.CPUPlace()
        tensor = var.get_tensor()
        tensor._set_dims([10, 10])
        tensor._alloc_int(place)
        tensor_array = numpy.array(tensor)
        self.assertEqual((10, 10), tensor_array.shape)
        tensor_array[0, 0] = 1
        tensor_array[2, 2] = 2
        tensor.set(tensor_array, place)
        print(tensor)
        self.assertTrue(isinstance(str(tensor), str))

        if core.is_compiled_with_cuda():
            tensor.set(tensor_array, core.CUDAPlace(0))
            print(tensor)
            self.assertTrue(isinstance(str(tensor), str))

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    def test_tensor_poiter(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var = scope.var("test_tensor")
        place = core.CPUPlace()
        tensor = var.get_tensor()
        dtype = core.VarDesc.VarType.FP32
        self.assertTrue(
            isinstance(tensor._mutable_data(place, dtype), numbers.Integral))

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            self.assertTrue(
                isinstance(
                    tensor._mutable_data(place, dtype), numbers.Integral))
            place = core.CUDAPinnedPlace()
            self.assertTrue(
                isinstance(
                    tensor._mutable_data(place, dtype), numbers.Integral))
285 286 287 288
            places = fluid.cuda_pinned_places()
            self.assertTrue(
                isinstance(
                    tensor._mutable_data(places[0], dtype), numbers.Integral))
289

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
    def test_tensor_set_fp16(self):
        array = numpy.random.random((300, 500)).astype("float16")
        tensor = fluid.Tensor()
        place = core.CPUPlace()
        tensor.set(array, place)
        self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
        self.assertTrue(numpy.array_equal(numpy.array(tensor), array))

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            tensor.set(array, place)
            self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
            self.assertTrue(numpy.array_equal(numpy.array(tensor), array))

            place = core.CUDAPinnedPlace()
            tensor.set(array, place)
            self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16)
            self.assertTrue(numpy.array_equal(numpy.array(tensor), array))

L
Leo Chen 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
    def test_tensor_set_int16(self):
        array = numpy.random.randint(100, size=(300, 500)).astype("int16")
        tensor = fluid.Tensor()
        place = core.CPUPlace()
        tensor.set(array, place)
        self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
        self.assertTrue(numpy.array_equal(numpy.array(tensor), array))

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            tensor.set(array, place)
            self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
            self.assertTrue(numpy.array_equal(numpy.array(tensor), array))

            place = core.CUDAPinnedPlace()
            tensor.set(array, place)
            self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16)
            self.assertTrue(numpy.array_equal(numpy.array(tensor), array))

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
    def test_tensor_set_from_array_list(self):
        array = numpy.random.randint(1000, size=(200, 300))
        list_array = [array, array]
        tensor = fluid.Tensor()
        place = core.CPUPlace()
        tensor.set(list_array, place)
        self.assertEqual([2, 200, 300], tensor.shape())
        self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            tensor.set(list_array, place)
            self.assertEqual([2, 200, 300], tensor.shape())
            self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))

            place = core.CUDAPinnedPlace()
            tensor.set(list_array, place)
            self.assertEqual([2, 200, 300], tensor.shape())
            self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array))

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
    def test_tensor_set_error(self):
        scope = core.Scope()
        var = scope.var("test_tensor")
        place = core.CPUPlace()

        tensor = var.get_tensor()

        exception = None
        try:
            error_array = ["1", "2"]
            tensor.set(error_array, place)
        except core.EnforceNotMet as ex:
            exception = ex

        self.assertIsNotNone(exception)

Y
Yu Yang 已提交
364 365 366

if __name__ == '__main__':
    unittest.main()