test_fmin_op.py 8.1 KB
Newer Older
L
LJQ❤️ 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest

paddle.enable_static()


class ApiFMinTest(unittest.TestCase):
    """ApiFMinTest"""

    def setUp(self):
        """setUp"""
        if core.is_compiled_with_cuda():
            self.place = core.CUDAPlace(0)
        else:
            self.place = core.CPUPlace()

        self.input_x = np.random.rand(10, 15).astype("float32")
        self.input_y = np.random.rand(10, 15).astype("float32")
        self.input_z = np.random.rand(15).astype("float32")
        self.input_a = np.array([0, np.nan, np.nan]).astype('int64')
        self.input_b = np.array([2, np.inf, -np.inf]).astype('int64')
        self.input_c = np.array([4, 1, 3]).astype('int64')

        self.np_expected1 = np.fmin(self.input_x, self.input_y)
        self.np_expected2 = np.fmin(self.input_x, self.input_z)
        self.np_expected3 = np.fmin(self.input_a, self.input_c)
        self.np_expected4 = np.fmin(self.input_b, self.input_c)

    def test_static_api(self):
        """test_static_api"""
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
            data_y = paddle.static.data("y", shape=[10, 15], dtype="float32")
            result_fmin = paddle.fmin(data_x, data_y)
            exe = paddle.static.Executor(self.place)
57 58 59 60
            res, = exe.run(feed={
                "x": self.input_x,
                "y": self.input_y
            },
L
LJQ❤️ 已提交
61
                           fetch_list=[result_fmin])
62
        np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05)
L
LJQ❤️ 已提交
63 64 65 66 67 68 69

        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
            data_z = paddle.static.data("z", shape=[15], dtype="float32")
            result_fmin = paddle.fmin(data_x, data_z)
            exe = paddle.static.Executor(self.place)
70 71 72 73
            res, = exe.run(feed={
                "x": self.input_x,
                "z": self.input_z
            },
L
LJQ❤️ 已提交
74
                           fetch_list=[result_fmin])
75
        np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05)
L
LJQ❤️ 已提交
76 77 78 79 80 81 82

        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data_a = paddle.static.data("a", shape=[3], dtype="int64")
            data_c = paddle.static.data("c", shape=[3], dtype="int64")
            result_fmin = paddle.fmin(data_a, data_c)
            exe = paddle.static.Executor(self.place)
83 84 85 86
            res, = exe.run(feed={
                "a": self.input_a,
                "c": self.input_c
            },
L
LJQ❤️ 已提交
87
                           fetch_list=[result_fmin])
88
        np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05)
L
LJQ❤️ 已提交
89 90 91 92 93 94 95

        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            data_b = paddle.static.data("b", shape=[3], dtype="int64")
            data_c = paddle.static.data("c", shape=[3], dtype="int64")
            result_fmin = paddle.fmin(data_b, data_c)
            exe = paddle.static.Executor(self.place)
96 97 98 99
            res, = exe.run(feed={
                "b": self.input_b,
                "c": self.input_c
            },
L
LJQ❤️ 已提交
100
                           fetch_list=[result_fmin])
101
        np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05)
L
LJQ❤️ 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115

    def test_dynamic_api(self):
        """test_dynamic_api"""
        paddle.disable_static()
        x = paddle.to_tensor(self.input_x)
        y = paddle.to_tensor(self.input_y)
        z = paddle.to_tensor(self.input_z)

        a = paddle.to_tensor(self.input_a)
        b = paddle.to_tensor(self.input_b)
        c = paddle.to_tensor(self.input_c)

        res = paddle.fmin(x, y)
        res = res.numpy()
116
        np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05)
L
LJQ❤️ 已提交
117 118 119 120

        # test broadcast
        res = paddle.fmin(x, z)
        res = res.numpy()
121
        np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05)
L
LJQ❤️ 已提交
122 123 124

        res = paddle.fmin(a, c)
        res = res.numpy()
125
        np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05)
L
LJQ❤️ 已提交
126 127 128

        res = paddle.fmin(b, c)
        res = res.numpy()
129
        np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05)
L
LJQ❤️ 已提交
130 131 132 133 134 135 136 137


class TestElementwiseFminOp(OpTest):
    """TestElementwiseFminOp"""

    def setUp(self):
        """setUp"""
        self.op_type = "elementwise_fmin"
138
        self.python_api = paddle.fmin
L
LJQ❤️ 已提交
139 140 141 142 143 144 145 146 147 148 149
        # If x and y have the same value, the min() is not differentiable.
        # So we generate test data by the following method
        # to avoid them being too close to each other.
        x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
        sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
        y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': np.fmin(self.inputs['X'], self.inputs['Y'])}

    def test_check_output(self):
        """test_check_output"""
150
        self.check_output(check_eager=True)
L
LJQ❤️ 已提交
151 152 153

    def test_check_grad_normal(self):
        """test_check_grad_normal"""
154
        self.check_grad(['X', 'Y'], 'Out', check_eager=True)
L
LJQ❤️ 已提交
155 156 157

    def test_check_grad_ingore_x(self):
        """test_check_grad_ingore_x"""
158 159 160 161 162
        self.check_grad(['Y'],
                        'Out',
                        max_relative_error=0.005,
                        no_grad_set=set("X"),
                        check_eager=True)
L
LJQ❤️ 已提交
163 164 165

    def test_check_grad_ingore_y(self):
        """test_check_grad_ingore_y"""
166 167 168 169 170
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.005,
                        no_grad_set=set('Y'),
                        check_eager=True)
L
LJQ❤️ 已提交
171 172 173 174 175 176 177 178


class TestElementwiseFmin2Op(OpTest):
    """TestElementwiseFmin2Op"""

    def setUp(self):
        """setUp"""
        self.op_type = "elementwise_fmin"
179
        self.python_api = paddle.fmin
L
LJQ❤️ 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192
        # If x and y have the same value, the min() is not differentiable.
        # So we generate test data by the following method
        # to avoid them being too close to each other.
        x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
        sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
        y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")

        y[2, 10:] = np.nan
        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': np.fmin(self.inputs['X'], self.inputs['Y'])}

    def test_check_output(self):
        """test_check_output"""
193
        self.check_output(check_eager=True)
L
LJQ❤️ 已提交
194 195 196

    def test_check_grad_normal(self):
        """test_check_grad_normal"""
197
        self.check_grad(['X', 'Y'], 'Out', check_eager=True)
L
LJQ❤️ 已提交
198 199 200

    def test_check_grad_ingore_x(self):
        """test_check_grad_ingore_x"""
201 202 203 204 205
        self.check_grad(['Y'],
                        'Out',
                        max_relative_error=0.005,
                        no_grad_set=set("X"),
                        check_eager=True)
L
LJQ❤️ 已提交
206 207 208

    def test_check_grad_ingore_y(self):
        """test_check_grad_ingore_y"""
209 210 211 212 213
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.005,
                        no_grad_set=set('Y'),
                        check_eager=True)
H
hong 已提交
214 215 216 217 218


if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()