test_max_op.py 5.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18
from op_test import check_out_dtype
19 20
from test_sum_op import TestReduceOPTensorAxisBase

21 22 23 24 25 26 27 28 29 30 31 32 33
import paddle
import paddle.fluid.core as core


class ApiMaxTest(unittest.TestCase):
    def setUp(self):
        if core.is_compiled_with_cuda():
            self.place = core.CUDAPlace(0)
        else:
            self.place = core.CPUPlace()

    def test_api(self):
        paddle.enable_static()
34 35 36
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
37
            data = paddle.static.data("data", shape=[10, 10], dtype="float32")
38 39 40
            result_max = paddle.max(x=data, axis=1)
            exe = paddle.static.Executor(self.place)
            input_data = np.random.rand(10, 10).astype(np.float32)
41
            (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_max])
42 43
        self.assertEqual((res == np.max(input_data, axis=1)).all(), True)

44 45 46
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
47
            data = paddle.static.data("data", shape=[10, 10], dtype="int64")
48 49 50
            result_max = paddle.max(x=data, axis=0)
            exe = paddle.static.Executor(self.place)
            input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
51
            (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_max])
52 53
        self.assertEqual((res == np.max(input_data, axis=0)).all(), True)

54 55 56
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
57
            data = paddle.static.data("data", shape=[10, 10], dtype="int64")
58 59 60
            result_max = paddle.max(x=data, axis=(0, 1))
            exe = paddle.static.Executor(self.place)
            input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
61
            (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_max])
62 63
        self.assertEqual((res == np.max(input_data, axis=(0, 1))).all(), True)

64 65 66 67
    def test_errors(self):
        paddle.enable_static()

        def test_input_type():
68 69 70
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
71 72 73 74 75 76 77 78
                data = np.random.rand(10, 10)
                result_max = paddle.max(x=data, axis=0)

        self.assertRaises(TypeError, test_input_type)

    def test_imperative_api(self):
        paddle.disable_static()
        np_x = np.array([10, 10]).astype('float64')
Z
Zhou Wei 已提交
79
        x = paddle.to_tensor(np_x)
80 81 82 83
        z = paddle.max(x, axis=0)
        np_z = z.numpy()
        z_expected = np.array(np.max(np_x, axis=0))
        self.assertEqual((np_z == z_expected).all(), True)
84

S
Shang Zhizhou 已提交
85 86 87 88 89 90 91 92 93 94 95 96
    def test_big_dimension(self):
        paddle.disable_static()
        x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2])
        np_x = x.numpy()
        z1 = paddle.max(x, axis=-1)
        z2 = paddle.max(x, axis=6)
        np_z1 = z1.numpy()
        np_z2 = z2.numpy()
        z_expected = np.array(np.max(np_x, axis=6))
        self.assertEqual((np_z1 == z_expected).all(), True)
        self.assertEqual((np_z2 == z_expected).all(), True)

97 98 99 100 101 102 103 104 105
    def test_all_negative_axis(self):
        paddle.disable_static()
        x = paddle.rand(shape=[2, 2])
        np_x = x.numpy()
        z1 = paddle.max(x, axis=(-2, -1))
        np_z1 = z1.numpy()
        z_expected = np.array(np.max(np_x, axis=(0, 1)))
        self.assertEqual((np_z1 == z_expected).all(), True)

106 107 108 109 110

class TestOutDtype(unittest.TestCase):
    def test_max(self):
        api_fn = paddle.max
        shape = [10, 16]
111 112 113 114 115
        check_out_dtype(
            api_fn,
            in_specs=[(shape,)],
            expect_dtypes=['float32', 'float64', 'int32', 'int64'],
        )
116 117


118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
class TestMaxWithTensorAxis1(TestReduceOPTensorAxisBase):
    def init_data(self):
        self.pd_api = paddle.max
        self.np_api = np.max
        self.x = paddle.randn([10, 5, 9, 9], dtype='float64')
        self.np_axis = np.array([1, 2], dtype='int64')
        self.tensor_axis = paddle.to_tensor([1, 2], dtype='int64')


class TestMaxWithTensorAxis2(TestReduceOPTensorAxisBase):
    def init_data(self):
        self.pd_api = paddle.max
        self.np_api = np.max
        self.x = paddle.randn([10, 10, 9, 9], dtype='float64')
        self.np_axis = np.array([0, 1, 2], dtype='int64')
        self.tensor_axis = [
            0,
            paddle.to_tensor([1], 'int64'),
136
            paddle.to_tensor([2], 'int64'),
137 138 139
        ]


140 141
if __name__ == '__main__':
    unittest.main()