test_erf_op.py 3.9 KB
Newer Older
F
Feiyu Chan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

F
Feiyu Chan 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16
19
from scipy.special import erf
F
Feiyu Chan 已提交
20

W
WuHaobo 已提交
21
import paddle
F
Feiyu Chan 已提交
22
import paddle.fluid.dygraph as dg
23
from paddle import fluid
F
Feiyu Chan 已提交
24 25 26 27 28


class TestErfOp(OpTest):
    def setUp(self):
        self.op_type = "erf"
G
GGBond8488 已提交
29
        self.prim_op_type = "prim"
30
        self.public_python_api = paddle.erf
31
        self.python_api = paddle.erf
F
Feiyu Chan 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45
        self.dtype = self._init_dtype()
        self.x_shape = [11, 17]
        x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype)
        y_ref = erf(x).astype(self.dtype)
        self.inputs = {'X': x}
        self.outputs = {'Out': y_ref}

    def _init_dtype(self):
        return "float64"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
G
GGBond8488 已提交
46
        self.check_grad(['X'], 'Out', check_prim=True)
F
Feiyu Chan 已提交
47 48 49 50 51 52 53 54


class TestErfLayer(unittest.TestCase):
    def _test_case(self, place):
        x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float64)
        y_ref = erf(x)
        with dg.guard(place) as g:
            x_var = dg.to_variable(x)
55
            y_var = paddle.erf(x_var)
F
Feiyu Chan 已提交
56
            y_test = y_var.numpy()
57
        np.testing.assert_allclose(y_ref, y_test, rtol=1e-05)
F
Feiyu Chan 已提交
58 59

    def test_case(self):
60 61 62 63
        with paddle.fluid.framework._static_guard():
            self._test_case(fluid.CPUPlace())
            if fluid.is_compiled_with_cuda():
                self._test_case(fluid.CUDAPlace(0))
F
Feiyu Chan 已提交
64

W
WuHaobo 已提交
65
    def test_name(self):
66 67 68 69 70
        with paddle.fluid.framework._static_guard():
            with fluid.program_guard(fluid.Program()):
                x = paddle.static.data('x', [3, 4])
                y = paddle.erf(x, name='erf')
                self.assertTrue('erf' in y.name)
W
WuHaobo 已提交
71

F
Feiyu Chan 已提交
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
class TestErfFP16OP(OpTest):
    def setUp(self):
        self.op_type = "erf"
        self.prim_op_type = "prim"
        self.public_python_api = paddle.erf
        self.python_api = paddle.erf
        self.dtype = np.float16
        self.x_shape = [11, 17]
        x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype)
        y_ref = erf(x).astype(self.dtype)
        self.inputs = {'X': x}
        self.outputs = {'Out': y_ref}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)


@unittest.skipIf(
    not paddle.fluid.core.is_compiled_with_cuda()
    or not paddle.fluid.core.is_bfloat16_supported(
        paddle.fluid.core.CUDAPlace(0)
    ),
    "core is not complied with CUDA and not support the bfloat16",
)
class TestErfBF16OP(OpTest):
    def setUp(self):
        self.op_type = "erf"
        self.prim_op_type = "prim"
        self.public_python_api = paddle.erf
        self.python_api = paddle.erf
        self.dtype = np.uint16
        self.x_shape = [11, 17]
        x = np.random.uniform(-1, 1, size=self.x_shape).astype(np.float32)
        y_ref = erf(x).astype(np.float32)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': convert_float_to_uint16(y_ref)}

    def test_check_output(self):
        place = paddle.fluid.core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = paddle.fluid.core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)


F
Feiyu Chan 已提交
122 123
if __name__ == '__main__':
    unittest.main()