test_isfinite_op.py 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16
19

20
from paddle.fluid import core
21 22 23 24 25 26 27 28 29 30 31 32 33


class TestInf(OpTest):
    def setUp(self):
        self.op_type = "isinf"
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        x[0] = np.inf
        x[-1] = np.inf

        self.inputs = {'X': x}
34
        self.outputs = {'Out': np.array([True]).astype(self.dtype)}
35 36 37 38 39 40 41 42

    def init_dtype(self):
        pass

    def test_output(self):
        self.check_output()


43 44 45
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
46 47 48 49 50
class TestFP16Inf(TestInf):
    def init_dtype(self):
        self.dtype = np.float16


51 52 53 54 55 56 57 58 59 60 61 62 63 64
# BFP16 isinf Test
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestInfBF16(OpTest):
    def setUp(self):
        self.op_type = "isinf"
        self.dtype = np.uint16
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        x[0] = np.inf
        x[-1] = np.inf

65
        out = np.array([True])
66 67 68 69 70 71 72
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}

    def test_output(self):
        self.check_output_with_place(core.CUDAPlace(0))


73 74 75 76 77 78 79 80 81 82 83
class TestNAN(OpTest):
    def setUp(self):
        self.op_type = "isnan"
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        x[0] = np.nan
        x[-1] = np.nan

        self.inputs = {'X': x}
84
        self.outputs = {'Out': np.array([True]).astype(self.dtype)}
85 86 87 88 89 90 91 92

    def init_dtype(self):
        pass

    def test_output(self):
        self.check_output()


93 94 95
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
96 97 98 99 100
class TestFP16NAN(TestNAN):
    def init_dtype(self):
        self.dtype = np.float16


101 102 103 104 105 106 107 108 109 110 111 112 113 114
# BFP16 isnan Test
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestNANBF16(OpTest):
    def setUp(self):
        self.op_type = "isnan"
        self.dtype = np.uint16
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        x[0] = np.nan
        x[-1] = np.nan

115
        out = np.array([True])
116 117 118 119 120 121 122
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}

    def test_output(self):
        self.check_output_with_place(core.CUDAPlace(0))


123 124 125 126 127 128 129 130 131 132 133 134
class TestIsfinite(OpTest):
    def setUp(self):
        self.op_type = "isfinite"
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        x[0] = np.inf
        x[-1] = np.nan
        out = np.isinf(x) | np.isnan(x)

        self.inputs = {'X': x}
135
        self.outputs = {'Out': np.array([False]).astype(self.dtype)}
136 137 138 139 140 141 142 143

    def init_dtype(self):
        pass

    def test_output(self):
        self.check_output()


144 145 146
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
147 148 149 150 151
class TestFP16Isfinite(TestIsfinite):
    def init_dtype(self):
        self.dtype = np.float16


152 153 154 155 156 157 158 159 160 161 162 163 164 165
# BFP16 isfinite Test
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
class TestIsfiniteBF16(OpTest):
    def setUp(self):
        self.op_type = "isfinite"
        self.dtype = np.uint16
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        x[0] = np.inf
        x[-1] = np.nan

166
        out = np.array([False])
167 168 169 170 171 172 173
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}

    def test_output(self):
        self.check_output_with_place(core.CUDAPlace(0))


174 175
if __name__ == '__main__':
    unittest.main()