test_elementwise_min_op.py 7.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

F
fengjiayi 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci
S
sneaxiy 已提交
18 19 20 21 22
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core

paddle.enable_static()
F
fengjiayi 已提交
23 24 25 26 27


class TestElementwiseOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_min"
28
        self.python_api = paddle.minimum
F
fengjiayi 已提交
29
        # If x and y have the same value, the min() is not differentiable.
F
fengjiayi 已提交
30 31
        # So we generate test data by the following method
        # to avoid them being too close to each other.
32 33 34
        x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
        sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
        y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
F
fengjiayi 已提交
35 36 37 38
        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}

    def test_check_output(self):
39 40 41 42
        if hasattr(self, 'attrs'):
            self.check_output(check_eager=False)
        else:
            self.check_output(check_eager=True)
F
fengjiayi 已提交
43 44

    def test_check_grad_normal(self):
45 46 47 48
        if hasattr(self, 'attrs'):
            self.check_grad(['X', 'Y'], 'Out', check_eager=False)
        else:
            self.check_grad(['X', 'Y'], 'Out', check_eager=True)
F
fengjiayi 已提交
49 50

    def test_check_grad_ingore_x(self):
51 52 53
        self.check_grad(
            ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")
        )
F
fengjiayi 已提交
54 55

    def test_check_grad_ingore_y(self):
56 57 58
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')
        )
F
fengjiayi 已提交
59 60


61
@skip_check_grad_ci(
62 63
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
64 65 66
class TestElementwiseMinOp_scalar(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_min"
67
        self.python_api = paddle.minimum
68 69
        x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float64")
        y = np.array([0.5]).astype("float64")
70 71 72 73
        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}


74
class TestElementwiseMinOp_Vector(TestElementwiseOp):
F
fengjiayi 已提交
75 76
    def setUp(self):
        self.op_type = "elementwise_min"
77
        self.python_api = paddle.minimum
78 79 80
        x = np.random.random((100,)).astype("float64")
        sgn = np.random.choice([-1, 1], (100,)).astype("float64")
        y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64")
F
fengjiayi 已提交
81 82 83 84
        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}


85
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
F
fengjiayi 已提交
86 87
    def setUp(self):
        self.op_type = "elementwise_min"
88
        self.python_api = paddle.minimum
89
        x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64)
90 91 92 93
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
        y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float64
        )
F
fengjiayi 已提交
94 95 96 97
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 0}
        self.outputs = {
98 99 100
            'Out': np.minimum(
                self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
            )
F
fengjiayi 已提交
101 102 103
        }


104
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
F
fengjiayi 已提交
105 106
    def setUp(self):
        self.op_type = "elementwise_min"
107
        self.python_api = paddle.minimum
108
        x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
109 110 111 112
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
        y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float64
        )
F
fengjiayi 已提交
113 114 115 116
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 1}
        self.outputs = {
117 118 119
            'Out': np.minimum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
            )
F
fengjiayi 已提交
120 121 122
        }


123
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
F
fengjiayi 已提交
124 125
    def setUp(self):
        self.op_type = "elementwise_min"
126
        self.python_api = paddle.minimum
127
        x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64)
128 129 130 131
        sgn = np.random.choice([-1, 1], (100,)).astype(np.float64)
        y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
            np.float64
        )
F
fengjiayi 已提交
132 133 134
        self.inputs = {'X': x, 'Y': y}

        self.outputs = {
135 136 137
            'Out': np.minimum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)
            )
F
fengjiayi 已提交
138 139 140
        }


141
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
F
fengjiayi 已提交
142 143
    def setUp(self):
        self.op_type = "elementwise_min"
144
        self.python_api = paddle.minimum
145 146
        x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64)
        sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64)
147 148 149
        y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype(
            np.float64
        )
F
fengjiayi 已提交
150 151 152 153
        self.inputs = {'X': x, 'Y': y}

        self.attrs = {'axis': 1}
        self.outputs = {
154 155 156
            'Out': np.minimum(
                self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1)
            )
F
fengjiayi 已提交
157 158 159
        }


160 161 162
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_min"
163
        self.python_api = paddle.minimum
164 165
        x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float64)
        sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float64)
166
        y = x + sgn * np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float64)
167 168 169 170 171
        self.inputs = {'X': x, 'Y': y}

        self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}


S
sneaxiy 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
class TestElementwiseMinOpFP16(unittest.TestCase):
    def get_out_and_grad(self, x_np, y_np, axis, place, use_fp32=False):
        assert x_np.dtype == np.float16
        assert y_np.dtype == np.float16
        if use_fp32:
            x_np = x_np.astype(np.float32)
            y_np = y_np.astype(np.float32)
        dtype = np.float16

        with fluid.dygraph.guard(place):
            x = paddle.to_tensor(x_np)
            y = paddle.to_tensor(y_np)
            x.stop_gradient = False
            y.stop_gradient = False
            z = fluid.layers.elementwise_min(x, y, axis)
            x_g, y_g = paddle.grad([z], [x, y])
188 189 190 191 192
            return (
                z.numpy().astype(dtype),
                x_g.numpy().astype(dtype),
                y_g.numpy().astype(dtype),
            )
S
sneaxiy 已提交
193 194 195 196 197 198 199 200 201 202 203

    def check_main(self, x_shape, y_shape, axis=-1):
        if not paddle.is_compiled_with_cuda():
            return
        place = paddle.CUDAPlace(0)
        if not core.is_float16_supported(place):
            return

        x_np = np.random.random(size=x_shape).astype(np.float16)
        y_np = np.random.random(size=y_shape).astype(np.float16)

204 205 206
        z_1, x_g_1, y_g_1 = self.get_out_and_grad(
            x_np, y_np, axis, place, False
        )
S
sneaxiy 已提交
207
        z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True)
208 209 210
        np.testing.assert_array_equal(z_1, z_2)
        np.testing.assert_array_equal(x_g_1, x_g_2)
        np.testing.assert_array_equal(y_g_1, y_g_2)
S
sneaxiy 已提交
211 212 213

    def test_main(self):
        self.check_main((13, 17), (13, 17))
214 215 216 217 218
        self.check_main((10, 3, 4), (1,))
        self.check_main((100,), (100,))
        self.check_main((100, 3, 2), (100,), 0)
        self.check_main((2, 100, 3), (100,), 1)
        self.check_main((2, 3, 100), (100,))
S
sneaxiy 已提交
219 220 221 222
        self.check_main((2, 25, 4, 1), (25, 4), 1)
        self.check_main((2, 10, 2, 5), (2, 10, 1, 5))


F
fengjiayi 已提交
223 224
if __name__ == '__main__':
    unittest.main()