test_elementwise_mul_op.py 12.4 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import unittest
16

17
import numpy as np
18

19
import paddle
20
import paddle.fluid.core as core
21
from paddle.fluid.framework import _test_eager_guard
22 23 24
from paddle.fluid.tests.unittests.op_test import (
    OpTest,
    convert_float_to_uint16,
25
    skip_check_grad_ci,
26
)
27 28


G
gongweibao 已提交
29
class ElementwiseMulOp(OpTest):
30 31 32
    def init_kernel_type(self):
        self.use_mkldnn = False

33 34
    def setUp(self):
        self.op_type = "elementwise_mul"
35
        self.dtype = np.float64
36 37 38 39 40 41
        self.axis = -1
        self.init_dtype()
        self.init_input_output()
        self.init_kernel_type()
        self.init_axis()

42
        self.inputs = {
43
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
44
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
45
        }
46 47
        self.outputs = {'Out': self.out}
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
48 49

    def test_check_output(self):
50
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
51
        self.check_output(check_dygraph=(not self.use_mkldnn))
52 53

    def test_check_grad_normal(self):
54
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
55
        self.check_grad(['X', 'Y'], 'Out', check_dygraph=(not self.use_mkldnn))
56 57

    def test_check_grad_ingore_x(self):
58
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
59 60 61 62
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
63
            check_dygraph=(not self.use_mkldnn),
64
        )
65 66

    def test_check_grad_ingore_y(self):
67
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
68 69 70 71
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
72
            check_dygraph=(not self.use_mkldnn),
73
        )
74

75 76 77 78 79 80 81 82 83 84 85
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.multiply(self.x, self.y)

    def init_dtype(self):
        pass

    def init_axis(self):
        pass

86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.multiply(self.x, self.y)


class TestElementwiseMulOp_ZeroDim2(ElementwiseMulOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.multiply(self.x, self.y)


class TestElementwiseMulOp_ZeroDim3(ElementwiseMulOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.multiply(self.x, self.y)


108 109 110 111 112 113 114 115 116 117 118 119
class TestBF16ElementwiseMulOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.multiply(self.x, self.y)

        self.axis = -1

        self.inputs = {
120 121 122 123 124 125
            'X': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.x)
            ),
            'Y': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.y)
            ),
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
        }
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X', 'Y'], 'Out')

    def test_check_grad_ingore_x(self):
        self.check_grad(['Y'], 'Out', no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        self.check_grad(['X'], 'Out', no_grad_set=set('Y'))


143
@skip_check_grad_ci(
144 145
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
146 147 148 149
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
150
            'X': np.random.rand(10, 3, 4).astype(np.float64),
151
            'Y': np.random.rand(1).astype(np.float64),
152 153
        }
        self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
154
        self.init_kernel_type()
155 156


G
gongweibao 已提交
157
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
158 159 160
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
161 162
            'X': np.random.random((100,)).astype("float64"),
            'Y': np.random.random((100,)).astype("float64"),
163 164
        }
        self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
165
        self.init_kernel_type()
166 167


G
gongweibao 已提交
168
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
169
    def init_input_output(self):
170 171 172
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x * self.y.reshape(100, 1, 1)
173

174 175
    def init_axis(self):
        self.axis = 0
176 177


G
gongweibao 已提交
178
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
179 180 181
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
182
            'X': np.random.rand(2, 100, 3).astype(np.float64),
183
            'Y': np.random.rand(100).astype(np.float64),
184 185 186 187
        }

        self.attrs = {'axis': 1}
        self.outputs = {
188
            'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
189
        }
190
        self.init_kernel_type()
191 192


G
gongweibao 已提交
193
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
194 195 196
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
197
            'X': np.random.rand(2, 3, 100).astype(np.float64),
198
            'Y': np.random.rand(100).astype(np.float64),
199 200 201
        }

        self.outputs = {
202
            'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
203
        }
204
        self.init_kernel_type()
205 206


G
gongweibao 已提交
207
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
208 209 210
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
211
            'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
212
            'Y': np.random.rand(10, 12).astype(np.float64),
213 214 215 216
        }

        self.attrs = {'axis': 1}
        self.outputs = {
217
            'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
218
        }
219
        self.init_kernel_type()
220 221


222 223 224 225
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
226
            'X': np.random.rand(10, 2, 11).astype(np.float64),
227
            'Y': np.random.rand(10, 1, 11).astype(np.float64),
228 229
        }
        self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
230
        self.init_kernel_type()
231 232 233 234 235 236


class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
237
            'X': np.random.rand(10, 4, 2, 3).astype(np.float64),
238
            'Y': np.random.rand(10, 4, 1, 3).astype(np.float64),
239 240
        }
        self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
241
        self.init_kernel_type()
242 243


244 245 246
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
W
Wu Yi 已提交
247 248 249 250 251
class TestElementwiseMulOpFp16(ElementwiseMulOp):
    def init_dtype(self):
        self.dtype = np.float16


252 253 254 255
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
256
            'X': np.random.rand(2, 3, 100).astype(np.float64),
257
            'Y': np.random.rand(1, 1, 100).astype(np.float64),
258 259
        }
        self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
260
        self.init_kernel_type()
261 262 263 264 265 266


class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
267
            'X': np.random.rand(30, 3, 1, 5).astype(np.float64),
268
            'Y': np.random.rand(30, 1, 4, 1).astype(np.float64),
269 270
        }
        self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
271
        self.init_kernel_type()
272 273 274 275 276 277


class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.inputs = {
278
            'X': np.random.rand(10, 10).astype(np.float64),
279
            'Y': np.random.rand(2, 2, 10, 10).astype(np.float64),
280 281 282 283 284
        }

        self.attrs = {'axis': 2}

        self.outputs = {
285
            'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
286
        }
287
        self.init_kernel_type()
288 289


290 291 292 293 294 295 296 297 298
class TestComplexElementwiseMulOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
299
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
300 301 302 303 304 305 306 307
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
308 309 310 311 312 313
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
314 315 316
        self.out = self.x * self.y

    def init_grad_input_output(self):
317 318 319
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
320 321 322 323 324 325 326
        self.grad_x = self.grad_out * np.conj(self.y)
        self.grad_y = self.grad_out * np.conj(self.x)

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
327 328 329 330 331 332
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
333 334

    def test_check_grad_ingore_x(self):
335 336 337 338 339 340 341
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
342 343

    def test_check_grad_ingore_y(self):
344 345 346 347 348 349 350
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
351 352


C
chentianyu03 已提交
353 354 355
class TestRealComplexElementwiseMulOp(TestComplexElementwiseMulOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
356 357 358
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
359 360 361
        self.out = self.x * self.y

    def init_grad_input_output(self):
362 363 364
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
365 366 367 368
        self.grad_x = np.real(self.grad_out * np.conj(self.y))
        self.grad_y = self.grad_out * np.conj(self.x)


369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
class TestElementwiseMulop(unittest.TestCase):
    def func_dygraph_mul(self):
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray * tenor
        expect_out = np_a * np_b
        actual_out = np_a * tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor * nparray
        actual_out = tensor_a * np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_mul(self):
        with _test_eager_guard():
            self.func_dygraph_mul()


395
if __name__ == '__main__':
396
    paddle.enable_static()
397
    unittest.main()