test_scale_op.py 10.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yu Yang 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, convert_float_to_uint16
18
import paddle
19
import paddle.fluid as fluid
20 21
import paddle.fluid.core as core
from paddle.fluid.op import Operator
22
from paddle.static import Program, program_guard
23 24 25
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
Y
Yu Yang 已提交
26 27


28
class TestScaleOp(OpTest):
Y
Yu Yang 已提交
29
    def setUp(self):
Q
qijun 已提交
30
        self.op_type = "scale"
31
        self.python_api = paddle.scale
32
        self.dtype = np.float64
C
chengduo 已提交
33 34
        self.init_dtype_type()
        self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
Y
Yu Yang 已提交
35
        self.attrs = {'scale': -2.3}
C
chengduo 已提交
36 37 38 39 40 41
        self.outputs = {
            'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
        }

    def init_dtype_type(self):
        pass
Y
Yu Yang 已提交
42

Q
qijun 已提交
43
    def test_check_output(self):
44
        self.check_output(check_eager=True)
Y
Yu Yang 已提交
45

Q
qijun 已提交
46
    def test_check_grad(self):
47
        self.check_grad(['X'], 'Out', check_eager=True)
Y
Yu Yang 已提交
48 49


50 51 52
class TestScaleOpScaleVariable(OpTest):
    def setUp(self):
        self.op_type = "scale"
53
        self.python_api = paddle.scale
54
        self.dtype = np.float64
55 56 57 58
        self.init_dtype_type()
        self.scale = -2.3
        self.inputs = {
            'X': np.random.random((10, 10)).astype(self.dtype),
59
            'ScaleTensor': np.array([self.scale]).astype('float64'),
60 61 62 63 64 65 66 67
        }
        self.attrs = {}
        self.outputs = {'Out': self.inputs['X'] * self.dtype(self.scale)}

    def init_dtype_type(self):
        pass

    def test_check_output(self):
68
        self.check_output(check_eager=True)
69 70

    def test_check_grad(self):
71
        self.check_grad(['X'], 'Out', check_eager=True)
72 73


74
class TestScaleOpSelectedRows(unittest.TestCase):
C
chengduo 已提交
75 76 77
    def init_dtype_type(self):
        pass

78 79 80
    def check_with_place(self, place, in_name, out_name):
        scope = core.Scope()

81
        self.dtype = np.float64
C
chengduo 已提交
82 83
        self.init_dtype_type()

84 85 86 87 88 89 90 91 92
        # create and initialize Grad Variable
        in_height = 10
        in_rows = [0, 4, 7]
        in_row_numel = 12
        scale = 2.0

        in_selected_rows = scope.var(in_name).get_selected_rows()
        in_selected_rows.set_height(in_height)
        in_selected_rows.set_rows(in_rows)
93 94 95
        in_array = np.random.random((len(in_rows), in_row_numel)).astype(
            self.dtype
        )
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115

        in_tensor = in_selected_rows.get_tensor()
        in_tensor.set(in_array, place)

        # create and initialize Param Variable
        out_selected_rows = scope.var(out_name).get_selected_rows()
        out_tensor = out_selected_rows.get_tensor()
        out_tensor._set_dims(in_tensor._get_dims())

        # create and run sgd operator
        scale_op = Operator("scale", X=in_name, Out=out_name, scale=scale)
        scale_op.run(scope, place)

        # get and compare result
        out_height = out_selected_rows.height()
        out_rows = out_selected_rows.rows()
        result_array = np.array(out_tensor)

        assert (in_array * scale == result_array).all()
        assert in_height == out_height
116
        assert in_rows == out_rows
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132

    def test_scale_selected_rows(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        for place in places:
            self.check_with_place(place, 'in', 'out')

    def test_scale_selected_rows_inplace(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))
        for place in places:
            self.check_with_place(place, 'in', 'in')


133 134 135
class TestScaleRaiseError(unittest.TestCase):
    def test_errors(self):
        def test_type():
2
201716010711 已提交
136
            paddle.scale([10])
137 138 139 140

        self.assertRaises(TypeError, test_type)


C
chengduo 已提交
141
# Add FP16 test
142 143 144
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
chengduo 已提交
145 146 147 148 149 150 151
class TestScaleFp16Op(TestScaleOp):
    def init_dtype_type(self):
        self.dtype = np.float16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
152
            self.check_output_with_place(place, atol=0.002, check_eager=True)
C
chengduo 已提交
153 154 155 156

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
157 158 159
            self.check_grad_with_place(
                place, ["X"], "Out", max_relative_error=0.05, check_eager=True
            )
C
chengduo 已提交
160 161


162 163 164
class TestScaleBF16Op(OpTest):
    def setUp(self):
        self.op_type = "scale"
165
        self.python_api = paddle.scale
166 167 168 169 170 171 172 173
        self.dtype = np.uint16
        self.attrs = {'scale': -2.3}
        x = np.random.random((10, 10)).astype(np.float32)
        out = x * np.float32(self.attrs['scale'])
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def test_check_output(self):
174
        self.check_output(check_eager=True)
175 176

    def test_check_grad(self):
177
        self.check_grad(['X'], 'Out', numeric_grad_delta=0.8, check_eager=True)
178 179


180 181 182
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
C
chengduo 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows):
    def init_dtype_type(self):
        self.dtype = np.float16

    def test_scale_selected_rows(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_with_place(place, 'in', 'out')

    def test_scale_selected_rows_inplace(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_with_place(place, 'in', 'in')


198 199 200 201 202 203 204 205 206 207 208 209 210 211
class TestScaleApiStatic(unittest.TestCase):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return paddle.scale(x, scale, bias)

    def test_api(self):
        paddle.enable_static()
        input = np.random.random([2, 25]).astype("float32")
        main_prog = Program()
        with program_guard(main_prog, Program()):
            x = paddle.static.data(name="x", shape=[2, 25], dtype="float32")
            out = self._executed_api(x, scale=2.0, bias=3.0)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        out = exe.run(main_prog, feed={"x": input}, fetch_list=[out])
212
        np.testing.assert_array_equal(out[0], input * 2.0 + 3.0)
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228


class TestScaleInplaceApiStatic(TestScaleApiStatic):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return x.scale_(scale, bias)


class TestScaleApiDygraph(unittest.TestCase):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return paddle.scale(x, scale, bias)

    def test_api(self):
        paddle.disable_static()
        input = np.random.random([2, 25]).astype("float32")
        x = paddle.to_tensor(input)
        out = self._executed_api(x, scale=2.0, bias=3.0)
229
        np.testing.assert_array_equal(out.numpy(), input * 2.0 + 3.0)
230 231 232 233 234 235 236 237
        paddle.enable_static()


class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return x.scale_(scale, bias)


238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
class TestScaleDoubleGradCheck(unittest.TestCase):
    def scale_wrapper(self, x):
        return paddle.scale(x[0], scale=2.0)

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.scale(data, 2.0)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

253 254 255
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
256
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
257 258 259
        gradient_checker.double_grad_check_for_dygraph(
            self.scale_wrapper, [data], out, x_init=[data_arr], place=place
        )
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestScaleTripleGradCheck(unittest.TestCase):
    def scale_wrapper(self, x):
        return paddle.scale(x[0], scale=2.0)

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [2, 3], False, dtype)
        data.persistable = True
        out = paddle.scale(data, 2.0)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

285 286 287
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
288
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
289 290 291
        gradient_checker.triple_grad_check_for_dygraph(
            self.scale_wrapper, [data], out, x_init=[data_arr], place=place
        )
292 293 294 295 296 297 298 299 300 301

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


302 303 304 305 306 307 308 309 310 311 312 313 314 315
class TestScaleOpZeroNumelVariable(unittest.TestCase):
    def test_check_zero_numel_cpu(self):
        paddle.set_device('cpu')
        data = paddle.ones([0, 1])
        out = paddle.scale(data, 2)
        self.assertEqual(out, data)

        if paddle.is_compiled_with_cuda():
            paddle.set_device('gpu')
            data = paddle.ones([0, 1])
            out = paddle.scale(data, 2)
            self.assertEqual(out, data)


Q
qijun 已提交
316
if __name__ == "__main__":
Y
Yu Yang 已提交
317
    unittest.main()