test_elementwise_mod_op.py 4.4 KB
Newer Older
P
phlrain 已提交
1
#  Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
import unittest
import numpy as np
18 19
import paddle
import paddle.fluid as fluid
20 21 22 23 24 25 26
import paddle.fluid.core as core
from op_test import OpTest

import random


class TestElementwiseModOp(OpTest):
27

28 29 30 31 32
    def init_kernel_type(self):
        self.use_mkldnn = False

    def setUp(self):
        self.op_type = "elementwise_mod"
33
        self.python_api = paddle.remainder
34 35 36 37 38 39 40 41 42 43 44 45 46 47
        self.axis = -1
        self.init_dtype()
        self.init_input_output()
        self.init_kernel_type()
        self.init_axis()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
        self.outputs = {'Out': self.out}

    def test_check_output(self):
48 49 50 51
        if self.attrs['axis'] == -1:
            self.check_output(check_eager=True)
        else:
            self.check_output(check_eager=False)
52 53 54 55 56 57 58

    def init_input_output(self):
        self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
        self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
        self.out = np.mod(self.x, self.y)

    def init_dtype(self):
59
        self.dtype = np.int32
60 61 62 63 64 65

    def init_axis(self):
        pass


class TestElementwiseModOp_scalar(TestElementwiseModOp):
66

67 68 69 70 71 72 73 74
    def init_input_output(self):
        scale_x = random.randint(0, 100000000)
        scale_y = random.randint(1, 100000000)
        self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
        self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
        self.out = np.mod(self.x, self.y)


75
class TestElementwiseModOpFloat(TestElementwiseModOp):
76

77 78 79 80 81 82
    def init_dtype(self):
        self.dtype = np.float32

    def init_input_output(self):
        self.x = np.random.uniform(-1000, 1000, [10, 10]).astype(self.dtype)
        self.y = np.random.uniform(-100, 100, [10, 10]).astype(self.dtype)
83
        self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y)
84 85

    def test_check_output(self):
86 87 88 89
        if self.attrs['axis'] == -1:
            self.check_output(check_eager=True)
        else:
            self.check_output(check_eager=False)
90 91 92


class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
93

94 95 96 97
    def init_dtype(self):
        self.dtype = np.float64


S
ShenLiang 已提交
98
class TestRemainderOp(unittest.TestCase):
99

S
ShenLiang 已提交
100 101 102 103 104 105 106
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="int64")
            y = fluid.data(name='y', shape=[2, 3], dtype='int64')

            y_1 = paddle.remainder(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
107 108

    def test_dygraph(self):
S
ShenLiang 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 8, 7]).astype('int64')
            np_y = np.array([1, 5, 3, 3]).astype('int64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.remainder(x, y)
            np_z = z.numpy()
            z_expected = np.array([0, 3, 2, 1])
            self.assertEqual((np_z == z_expected).all(), True)

            np_x = np.array([-3.3, 11.5, -2, 3.5])
            np_y = np.array([-1.2, 2., 3.3, -2.3])
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = x % y
            z_expected = np.array([-0.9, 1.5, 1.3, -1.1])
            self.assertEqual(np.allclose(z_expected, z.numpy()), True)

            np_x = np.array([-3, 11, -2, 3])
            np_y = np.array([-1, 2, 3, -2])
            x = paddle.to_tensor(np_x, dtype="int64")
            y = paddle.to_tensor(np_y, dtype="int64")
            z = x % y
            z_expected = np.array([0, 1, 1, -1])
            self.assertEqual(np.allclose(z_expected, z.numpy()), True)
S
ShenLiang 已提交
134

135

136 137
if __name__ == '__main__':
    unittest.main()