test_math_op_patch.py 14.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
Y
Yang Yu 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
Y
Yang Yu 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
Y
Yang Yu 已提交
9 10 11 12 13 14 15
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16
from decorator_helper import prog_scope
17
import paddle
18
import paddle.fluid as fluid
Y
Yang Yu 已提交
19
import numpy
20
import numpy as np
Y
Yang Yu 已提交
21 22 23


class TestMathOpPatches(unittest.TestCase):
24

25 26 27
    def setUp(self):
        paddle.enable_static()

28
    @prog_scope()
Y
Yang Yu 已提交
29 30 31
    def test_add_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = a + 10
32 33 34 35
        ab = fluid.layers.concat(input=[a, b], axis=1)
        c = ab + 10
        d = ab + a
        # e = a + ab
Y
Yang Yu 已提交
36 37
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
38
        a_np = np.random.random(size=[10, 1]).astype('float32')
39 40 41
        b_np, c_np, d_np = exe.run(fluid.default_main_program(),
                                   feed={"a": a_np},
                                   fetch_list=[b, c, d])
42
        np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
43
        ab_np = np.concatenate([a_np, b_np], axis=1)
44
        np.testing.assert_allclose(ab_np + 10, c_np, rtol=1e-05)
45
        d_expected = ab_np + np.concatenate([a_np, a_np], axis=1)
46
        np.testing.assert_allclose(d_expected, d_np, rtol=1e-05)
Y
Yang Yu 已提交
47

48
    @prog_scope()
Y
Yang Yu 已提交
49 50 51 52 53
    def test_radd_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = 10 + a
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
54
        a_np = np.random.random(size=[10, 1]).astype('float32')
Y
Yang Yu 已提交
55 56 57
        b_np = exe.run(fluid.default_main_program(),
                       feed={"a": a_np},
                       fetch_list=[b])
58
        np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
Y
Yang Yu 已提交
59

60
    @prog_scope()
Y
Yang Yu 已提交
61 62 63 64 65
    def test_sub_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = a - 10
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
66
        a_np = np.random.random(size=[10, 1]).astype('float32')
67 68 69 70
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05)
Y
Yang Yu 已提交
71

72
    @prog_scope()
Y
Yang Yu 已提交
73 74 75 76 77
    def test_radd_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = 10 - a
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
78
        a_np = np.random.random(size=[10, 1]).astype('float32')
79 80 81 82
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(10 - a_np, b_np, rtol=1e-05)
Y
Yang Yu 已提交
83

84
    @prog_scope()
Y
Yang Yu 已提交
85 86 87 88 89
    def test_mul_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = a * 10
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
90
        a_np = np.random.random(size=[10, 1]).astype('float32')
91 92 93 94
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(a_np * 10, b_np, rtol=1e-05)
Y
Yang Yu 已提交
95

96
    @prog_scope()
Y
Yang Yu 已提交
97 98 99 100 101
    def test_rmul_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = 10 * a
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
102
        a_np = np.random.random(size=[10, 1]).astype('float32')
103 104 105 106
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(10 * a_np, b_np, rtol=1e-05)
Y
Yang Yu 已提交
107

108
    @prog_scope()
Y
Yang Yu 已提交
109 110 111 112 113
    def test_div_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = a / 10
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
114
        a_np = np.random.random(size=[10, 1]).astype('float32')
115 116 117 118
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(a_np / 10, b_np, rtol=1e-05)
Y
Yang Yu 已提交
119

120
    @prog_scope()
Y
Yang Yu 已提交
121 122 123 124 125
    def test_rdiv_scalar(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = 10 / a
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
126
        a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2
Y
Yang Yu 已提交
127

128 129 130 131
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(10 / a_np, b_np, rtol=1e-05)
Y
Yang Yu 已提交
132

133
    @prog_scope()
Y
Yang Yu 已提交
134 135 136 137 138 139
    def test_div_two_tensor(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = fluid.layers.data(name="b", shape=[1])
        c = a / b
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
140 141
        a_np = np.random.random(size=[10, 1]).astype('float32')
        b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2
142 143 144 145 146 147 148
        c_np, = exe.run(fluid.default_main_program(),
                        feed={
                            "a": a_np,
                            'b': b_np
                        },
                        fetch_list=[c])
        np.testing.assert_allclose(a_np / b_np, c_np, rtol=1e-05)
Y
Yang Yu 已提交
149

150
    @prog_scope()
Y
Yang Yu 已提交
151 152 153 154 155 156
    def test_mul_two_tensor(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = fluid.layers.data(name="b", shape=[1])
        c = a * b
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
157 158
        a_np = np.random.random(size=[10, 1]).astype('float32')
        b_np = np.random.random(size=[10, 1]).astype('float32')
159 160 161 162 163 164 165
        c_np, = exe.run(fluid.default_main_program(),
                        feed={
                            "a": a_np,
                            'b': b_np
                        },
                        fetch_list=[c])
        np.testing.assert_allclose(a_np * b_np, c_np, rtol=1e-05)
Y
Yang Yu 已提交
166

167
    @prog_scope()
Y
Yang Yu 已提交
168 169 170 171 172 173
    def test_add_two_tensor(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = fluid.layers.data(name="b", shape=[1])
        c = a + b
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
174 175
        a_np = np.random.random(size=[10, 1]).astype('float32')
        b_np = np.random.random(size=[10, 1]).astype('float32')
176 177 178 179 180 181 182
        c_np, = exe.run(fluid.default_main_program(),
                        feed={
                            "a": a_np,
                            'b': b_np
                        },
                        fetch_list=[c])
        np.testing.assert_allclose(a_np + b_np, c_np, rtol=1e-05)
Y
Yang Yu 已提交
183

184
    @prog_scope()
Y
Yang Yu 已提交
185 186 187 188 189 190
    def test_sub_two_tensor(self):
        a = fluid.layers.data(name="a", shape=[1])
        b = fluid.layers.data(name="b", shape=[1])
        c = a - b
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
191 192
        a_np = np.random.random(size=[10, 1]).astype('float32')
        b_np = np.random.random(size=[10, 1]).astype('float32')
193 194 195 196 197 198 199
        c_np, = exe.run(fluid.default_main_program(),
                        feed={
                            "a": a_np,
                            'b': b_np
                        },
                        fetch_list=[c])
        np.testing.assert_allclose(a_np - b_np, c_np, rtol=1e-05)
Y
Yang Yu 已提交
200

201 202 203
    @prog_scope()
    def test_integer_div(self):
        a = fluid.layers.data(name="a", shape=[1], dtype='int64')
S
ShenLiang 已提交
204
        b = a / 7
205 206
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
207
        a_np = np.array([3, 4, 10, 14, 9, 18]).astype('int64')
208 209 210
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
S
ShenLiang 已提交
211

212
        b_np_actual = (a_np / 7).astype('float32')
213
        np.testing.assert_allclose(b_np, b_np_actual, rtol=1e-05)
214

215 216 217 218 219 220 221 222
    @prog_scope()
    def test_equal(self):
        a = fluid.layers.data(name="a", shape=[1], dtype='float32')
        b = fluid.layers.data(name="b", shape=[1], dtype='float32')
        c = (a == b)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
223 224
        a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32')
        b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32')
225 226

        c_np, = exe.run(fluid.default_main_program(),
227 228 229 230
                        feed={
                            "a": a_np,
                            "b": b_np
                        },
231 232
                        fetch_list=[c])

233
        np.testing.assert_array_equal(c_np, a_np == b_np)
234 235 236 237 238 239 240 241 242 243 244 245 246 247
        self.assertEqual(c.dtype, fluid.core.VarDesc.VarType.BOOL)

    @prog_scope()
    def test_equal_and_cond(self):
        a = fluid.layers.data(name="a", shape=[1], dtype='float32')
        b = fluid.layers.data(name="b", shape=[1], dtype='float32')

        one = fluid.layers.ones(shape=[1], dtype='int32')
        zero = fluid.layers.zeros(shape=[1], dtype='int32')
        cond = (one == zero)
        c = fluid.layers.cond(cond, lambda: a + b, lambda: a - b)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
248 249
        a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float')
        b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float')
250
        c_np, = exe.run(fluid.default_main_program(),
251 252 253 254
                        feed={
                            "a": a_np,
                            "b": b_np
                        },
255 256
                        fetch_list=[c])

257
        np.testing.assert_array_equal(c_np, a_np - b_np)
258

259 260 261 262 263 264
    @prog_scope()
    def test_neg(self):
        a = fluid.layers.data(name="a", shape=[10, 1])
        b = -a
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
265
        a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32')
266

267 268 269 270
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(-a_np, b_np, rtol=1e-05)
271

272 273 274 275 276 277
    @prog_scope()
    def test_astype(self):
        a = fluid.layers.data(name="a", shape=[10, 1])
        b = a.astype('float32')
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
278
        a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64')
279

280 281 282 283
        b_np, = exe.run(fluid.default_main_program(),
                        feed={"a": a_np},
                        fetch_list=[b])
        np.testing.assert_allclose(a_np.astype('float32'), b_np, rtol=1e-05)
284

285 286 287 288 289 290 291 292 293 294 295
    def test_bitwise_and(self):
        x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        y_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        out_np = x_np & y_np

        x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
        y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32")
        z = x & y

        exe = fluid.Executor()
        out = exe.run(fluid.default_main_program(),
296 297 298 299
                      feed={
                          "x": x_np,
                          "y": y_np
                      },
300
                      fetch_list=[z])
301
        np.testing.assert_array_equal(out[0], out_np)
302 303 304 305 306 307 308 309 310 311 312 313 314

    @prog_scope()
    def test_bitwise_or(self):
        x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        y_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        out_np = x_np | y_np

        x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
        y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32")
        z = x | y

        exe = fluid.Executor()
        out = exe.run(fluid.default_main_program(),
315 316 317 318
                      feed={
                          "x": x_np,
                          "y": y_np
                      },
319
                      fetch_list=[z])
320
        np.testing.assert_array_equal(out[0], out_np)
321 322 323 324 325 326 327 328 329 330 331 332 333

    @prog_scope()
    def test_bitwise_xor(self):
        x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        y_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        out_np = x_np ^ y_np

        x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
        y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32")
        z = x ^ y

        exe = fluid.Executor()
        out = exe.run(fluid.default_main_program(),
334 335 336 337
                      feed={
                          "x": x_np,
                          "y": y_np
                      },
338
                      fetch_list=[z])
339
        np.testing.assert_array_equal(out[0], out_np)
340 341 342 343 344 345 346 347 348

    @prog_scope()
    def test_bitwise_not(self):
        x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
        out_np = ~x_np

        x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
        z = ~x

349 350 351 352
        exe = fluid.Executor()
        out = exe.run(fluid.default_main_program(),
                      feed={"x": x_np},
                      fetch_list=[z])
353
        np.testing.assert_array_equal(out[0], out_np)
354 355 356 357 358 359 360 361 362

    @prog_scope()
    def test_T(self):
        x_np = np.random.randint(-100, 100, [2, 8, 5, 3]).astype("int32")
        out_np = x_np.T

        x = paddle.static.data(name="x", shape=[2, 8, 5, 3], dtype="int32")
        z = x.T

363 364 365 366
        exe = fluid.Executor()
        out = exe.run(fluid.default_main_program(),
                      feed={"x": x_np},
                      fetch_list=[z])
367
        np.testing.assert_array_equal(out[0], out_np)
368

369 370 371 372 373 374 375 376 377 378 379
    @prog_scope()
    def test_ndim(self):
        a = paddle.static.data(name="a", shape=[10, 1])
        self.assertEqual(a.dim(), 2)
        self.assertEqual(a.ndimension(), 2)
        self.assertEqual(a.ndim, 2)

    @prog_scope()
    def test_matmul(self):
        a = paddle.static.data(name='a', shape=[2, 3], dtype='float32')
        b = paddle.static.data(name='b', shape=[3, 5], dtype='float32')
380
        c = a @ b  # __matmul__
381 382
        a_np = np.random.uniform(-1, 1, size=[2, 3]).astype('float32')
        b_np = np.random.uniform(-1, 1, size=[3, 5]).astype('float32')
383 384
        place = paddle.CPUPlace()
        exe = paddle.static.Executor(place)
385 386 387 388 389 390 391
        c_np, = exe.run(paddle.static.default_main_program(),
                        feed={
                            "a": a_np,
                            "b": b_np
                        },
                        fetch_list=[c])
        np.testing.assert_allclose(a_np @ b_np, c_np, rtol=1e-05)
392

Y
Yang Yu 已提交
393 394 395

if __name__ == '__main__':
    unittest.main()