test_elementwise_max_op.py 15.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

F
fengjiayi 已提交
15
import unittest
16

F
fengjiayi 已提交
17
import numpy as np
W
wanghuancoder 已提交
18
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
19

20
import paddle
21
from paddle.fluid import core
F
fengjiayi 已提交
22 23 24


class TestElementwiseOp(OpTest):
25
    def init_data(self):
F
fengjiayi 已提交
26 27 28
        # If x and y have the same value, the max() is not differentiable.
        # So we generate test data by the following method
        # to avoid them being too close to each other.
29
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
30
        sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
31 32 33 34 35 36 37 38 39 40
        self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
            "float64"
        )

    def setUp(self):
        self.init_data()
        self.op_type = "elementwise_max"
        self.prim_op_type = "prim"
        self.enable_cinn = False
        self.python_api = paddle.maximum
41
        self.public_python_api = paddle.maximum
42
        self.inputs = {'X': self.x, 'Y': self.y}
F
fengjiayi 已提交
43 44 45
        self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}

    def test_check_output(self):
46
        if hasattr(self, 'attrs'):
W
wanghuancoder 已提交
47
            self.check_output(check_dygraph=False)
48
        else:
W
wanghuancoder 已提交
49
            self.check_output()
F
fengjiayi 已提交
50 51

    def test_check_grad_normal(self):
52
        if hasattr(self, 'attrs'):
H
heyanru 已提交
53 54
            if self.attrs['axis'] == -1:
                self.check_grad(
W
wanghuancoder 已提交
55
                    ['X', 'Y'], 'Out', check_dygraph=False, check_prim=True
H
heyanru 已提交
56 57
                )
            else:
W
wanghuancoder 已提交
58
                self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
59
        else:
W
wanghuancoder 已提交
60
            self.check_grad(['X', 'Y'], 'Out', check_prim=True)
F
fengjiayi 已提交
61 62

    def test_check_grad_ingore_x(self):
H
heyanru 已提交
63 64 65 66 67 68
        if hasattr(self, 'attrs') and self.attrs['axis'] != -1:
            self.check_grad(
                ['Y'],
                'Out',
                max_relative_error=0.005,
                no_grad_set=set("X"),
W
wanghuancoder 已提交
69
                check_dygraph=False,
H
heyanru 已提交
70 71 72 73 74 75 76 77 78
            )
        else:
            self.check_grad(
                ['Y'],
                'Out',
                max_relative_error=0.005,
                no_grad_set=set("X"),
                check_prim=True,
            )
F
fengjiayi 已提交
79 80

    def test_check_grad_ingore_y(self):
H
heyanru 已提交
81 82 83 84 85 86
        if hasattr(self, 'attrs') and self.attrs['axis'] != -1:
            self.check_grad(
                ['X'],
                'Out',
                max_relative_error=0.005,
                no_grad_set=set('Y'),
W
wanghuancoder 已提交
87
                check_dygraph=False,
H
heyanru 已提交
88 89 90 91 92 93 94 95 96
            )
        else:
            self.check_grad(
                ['X'],
                'Out',
                max_relative_error=0.005,
                no_grad_set=set('Y'),
                check_prim=True,
            )
97 98


99 100 101 102 103 104 105 106 107
class TestElementwiseFP16Op(TestElementwiseOp):
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
        sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16)
        self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
            np.float16
        )


108
class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp):
109 110 111 112 113 114 115 116 117
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, []).astype("float64")
        self.y = np.random.uniform(0.1, 1, []).astype("float64")


class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp):
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, []).astype("float16")
        self.y = np.random.uniform(0.1, 1, []).astype("float16")
118 119 120


class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp):
121 122 123 124 125 126 127 128 129
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
        self.y = np.random.uniform(0.1, 1, []).astype("float64")


class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp):
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
        self.y = np.random.uniform(0.1, 1, []).astype("float16")
130 131 132


class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp):
133 134 135 136 137 138 139 140 141
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, []).astype("float64")
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float64")


class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp):
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, []).astype("float16")
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
142 143


144 145 146 147 148 149 150 151
@unittest.skipIf(
    core.is_compiled_with_cuda()
    and (
        core.cudnn_version() < 8100
        or paddle.device.cuda.get_device_capability()[0] < 8
    ),
    "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.",
)
152
class TestElementwiseBF16Op(OpTest):
153 154 155 156 157 158 159 160 161 162
    def init_data(self):
        # If x and y have the same value, the max() is not differentiable.
        # So we generate test data by the following method
        # to avoid them being too close to each other.
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32)
        self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
            np.float32
        )

163
    def setUp(self):
164
        self.init_data()
165
        self.op_type = "elementwise_max"
166
        self.python_api = paddle.maximum
167
        self.public_python_api = paddle.maximum
H
heyanru 已提交
168 169
        self.prim_op_type = "prim"
        self.enable_cinn = False
170 171
        self.dtype = np.uint16
        self.inputs = {
172 173 174 175 176
            'X': convert_float_to_uint16(self.x),
            'Y': convert_float_to_uint16(self.y),
        }
        self.outputs = {
            'Out': convert_float_to_uint16(np.maximum(self.x, self.y))
177 178 179
        }

    def test_check_output(self):
180
        if hasattr(self, 'attrs'):
W
wanghuancoder 已提交
181
            self.check_output(check_dygraph=False)
182
        else:
W
wanghuancoder 已提交
183
            self.check_output()
184 185

    def test_check_grad_normal(self):
186
        if hasattr(self, 'attrs'):
H
heyanru 已提交
187
            # check_prim=False, bfloat16 is not supported in `less_equal`
W
wanghuancoder 已提交
188
            self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
189
        else:
W
wanghuancoder 已提交
190
            self.check_grad(['X', 'Y'], 'Out')
191 192 193 194 195 196 197 198

    def test_check_grad_ingore_x(self):
        self.check_grad(['Y'], 'Out', no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        self.check_grad(['X'], 'Out', no_grad_set=set('Y'))


199 200 201 202 203 204 205 206
class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op):
    def init_data(self):
        self.x = np.random.uniform(0.1, 1, []).astype("float32")
        self.y = np.random.uniform(0.1, 1, []).astype("float32")

    def test_check_grad_normal(self):
        if hasattr(self, 'attrs'):
            self.check_grad(
W
wanghuancoder 已提交
207
                ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_dygraph=False
208 209
            )
        else:
W
wanghuancoder 已提交
210
            self.check_grad(['X', 'Y'], 'Out', numeric_grad_delta=0.05)
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229

    def test_check_grad_ingore_x(self):
        self.check_grad(
            ['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X")
        )

    def test_check_grad_ingore_y(self):
        self.check_grad(
            ['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y')
        )


class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op):
    def init_data(self):
        self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
        self.y = np.array([0.5]).astype("float32")
        self.__class__.no_need_check_grad = True


230
@skip_check_grad_ci(
231 232
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
233
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
234 235 236 237 238 239 240 241 242
    def init_data(self):
        self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64")
        self.y = np.array([0.5]).astype("float64")


class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar):
    def init_data(self):
        self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16")
        self.y = np.array([0.5]).astype("float16")
243 244


F
fengjiayi 已提交
245
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
246 247
    def init_data(self):
        self.x = np.random.random((100,)).astype("float64")
248
        sgn = np.random.choice([-1, 1], (100,)).astype("float64")
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
        self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
            "float64"
        )


class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp):
    def init_data(self):
        self.x = np.random.random((100,)).astype("float16")
        sgn = np.random.choice([-1, 1], (100,)).astype("float16")
        self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
            "float16"
        )


class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op):
    def init_data(self):
        self.x = np.random.random((100,)).astype("float32")
        sgn = np.random.choice([-1, 1], (100,)).astype("float32")
        self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
            "float32"
        )
F
fengjiayi 已提交
270 271 272 273 274


class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
275
        self.python_api = paddle.maximum
276
        self.public_python_api = paddle.maximum
H
heyanru 已提交
277
        self.prim_op_type = "prim"
278
        x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64)
279 280 281 282
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
        y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float64
        )
F
fengjiayi 已提交
283 284 285 286
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 0}
        self.outputs = {
287 288 289
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
            )
F
fengjiayi 已提交
290 291 292
        }


293 294 295 296
class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
        self.python_api = paddle.maximum
297
        self.public_python_api = paddle.maximum
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
        self.prim_op_type = "prim"
        x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16)
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
        y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float16
        )
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 0}
        self.outputs = {
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
            )
        }


F
fengjiayi 已提交
314 315 316
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
317
        self.python_api = paddle.maximum
318
        self.public_python_api = paddle.maximum
H
heyanru 已提交
319
        self.prim_op_type = "prim"
320
        x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
321 322 323 324
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
        y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float64
        )
F
fengjiayi 已提交
325 326 327 328
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 1}
        self.outputs = {
329 330 331
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
            )
F
fengjiayi 已提交
332 333 334
        }


335 336 337 338
class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
        self.python_api = paddle.maximum
339
        self.public_python_api = paddle.maximum
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
        self.prim_op_type = "prim"
        x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16)
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
        y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float16
        )
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 1}
        self.outputs = {
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
            )
        }


F
fengjiayi 已提交
356 357 358
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
359
        self.python_api = paddle.maximum
360
        self.public_python_api = paddle.maximum
H
heyanru 已提交
361
        self.prim_op_type = "prim"
362
        x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float64)
363 364 365 366
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
        y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float64
        )
F
fengjiayi 已提交
367 368 369
        self.inputs = {'X': x, 'Y': y}

        self.outputs = {
370 371 372
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)
            )
F
fengjiayi 已提交
373 374 375
        }


376 377 378 379
class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
        self.python_api = paddle.maximum
380
        self.public_python_api = paddle.maximum
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
        self.prim_op_type = "prim"
        x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16)
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
        y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float16
        )
        self.inputs = {'X': x, 'Y': y}

        self.outputs = {
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)
            )
        }


F
fengjiayi 已提交
396 397 398
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
399
        self.python_api = paddle.maximum
400
        self.public_python_api = paddle.maximum
H
heyanru 已提交
401
        self.prim_op_type = "prim"
402 403
        x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64)
        sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64)
404 405 406
        y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
            np.float64
        )
F
fengjiayi 已提交
407 408 409 410
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 1}
        self.outputs = {
411 412 413
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
            )
F
fengjiayi 已提交
414 415 416
        }


417 418 419 420
class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
        self.python_api = paddle.maximum
421
        self.public_python_api = paddle.maximum
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
        self.prim_op_type = "prim"
        x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16)
        sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16)
        y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
            np.float16
        )
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 1}
        self.outputs = {
            'Out': np.maximum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
            )
        }


438 439 440
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
441
        self.python_api = paddle.maximum
442
        self.public_python_api = paddle.maximum
H
heyanru 已提交
443
        self.prim_op_type = "prim"
444 445
        x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
        sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64)
446
        y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float64)
447 448 449 450 451
        self.inputs = {'X': x, 'Y': y}

        self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


452 453 454 455
class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_max"
        self.python_api = paddle.maximum
456
        self.public_python_api = paddle.maximum
457 458 459 460 461 462 463 464
        self.prim_op_type = "prim"
        x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16)
        sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16)
        y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16)
        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}


F
fengjiayi 已提交
465 466
if __name__ == '__main__':
    unittest.main()