test_elementwise_nn_grad.py 13.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17

import gradient_checker
18
import numpy as np
19
from decorator_helper import prog_scope
20

21
import paddle
22 23 24 25 26 27 28
import paddle.fluid as fluid
import paddle.fluid.core as core


class TestElementwiseMulDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
29
        # the shape of input variable should be clearly specified, not inlcude -1.
30
        shape = [2, 3, 4, 5]
31 32 33
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
34 35
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape, dtype)
36 37
        x.persistable = True
        y.persistable = True
38
        out = paddle.multiply(x, y)
39 40 41
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape).astype(dtype)

42 43 44
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
45 46

    def test_grad(self):
47
        paddle.enable_static()
48 49 50 51 52 53 54 55 56 57
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
58
        # the shape of input variable should be clearly specified, not inlcude -1.
59
        shape = [2, 3, 4, 5]
60 61 62
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
63 64
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape[:-1], dtype)
65 66
        x.persistable = True
        y.persistable = True
67
        out = paddle.tensor.math._multiply_with_axis(x, y, axis=0)
68 69 70
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)

71 72 73
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
74 75

    def test_grad(self):
76
        paddle.enable_static()
77 78 79 80 81 82 83 84 85 86
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseAddDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
87
        # the shape of input variable should be clearly specified, not inlcude -1.
88
        shape = [2, 3, 4, 5]
89 90 91
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
92 93
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape, dtype)
94 95
        x.persistable = True
        y.persistable = True
96
        out = paddle.add(x, y)
97 98 99
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape).astype(dtype)

100 101 102
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
103 104

    def test_grad(self):
105
        paddle.enable_static()
106 107 108 109 110 111 112 113 114 115
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
116
        # the shape of input variable should be clearly specified, not inlcude -1.
117
        shape = [2, 3, 4, 5]
118 119 120
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
121 122
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape[:-1], dtype)
123 124
        x.persistable = True
        y.persistable = True
125
        out = paddle.tensor.math._add_with_axis(x, y, axis=0)
126 127 128
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)

129 130 131
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
132 133

    def test_grad(self):
134
        paddle.enable_static()
135 136 137 138 139 140 141 142
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseSubDoubleGradCheck(unittest.TestCase):
143 144 145
    def subtract_wrapper(self, x):
        return paddle.subtract(x[0], x[1])

146 147
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
148
        # the shape of input variable should be clearly specified, not inlcude -1.
149
        shape = [2, 3, 4, 5]
150 151 152
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
153 154
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape, dtype)
155 156
        x.persistable = True
        y.persistable = True
157
        out = paddle.subtract(x, y)
158 159 160
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape).astype(dtype)

161 162 163 164 165 166 167 168 169 170
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.subtract_wrapper,
            [x, y],
            out,
            x_init=[x_arr, y_arr],
            place=place,
        )
171 172

    def test_grad(self):
173
        paddle.enable_static()
174 175 176 177 178 179 180 181 182 183
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
184
        # the shape of input variable should be clearly specified, not inlcude -1.
185
        shape = [2, 3, 4, 5]
186 187 188
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
189 190
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape[:-1], dtype)
191 192
        x.persistable = True
        y.persistable = True
193
        out = paddle.tensor.math._subtract_with_axis(x, y, axis=0)
194 195 196
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)

197 198 199
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
200 201

    def test_grad(self):
202
        paddle.enable_static()
203 204 205 206 207 208 209 210
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseDivDoubleGradCheck(unittest.TestCase):
211 212 213
    def divide_wrapper(self, x):
        return paddle.divide(x[0], x[1])

214 215
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
216
        # the shape of input variable should be clearly specified, not inlcude -1.
217
        shape = [2, 3, 4, 5]
218 219 220
        eps = 0.0001
        dtype = np.float64

G
GGBond8488 已提交
221 222
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape, dtype)
223 224
        x.persistable = True
        y.persistable = True
225
        out = paddle.tensor.math._divide_with_axis(x, y, axis=0)
226 227 228 229
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr[np.abs(y_arr) < 0.005] = 0.02

230 231 232 233 234 235 236 237 238 239 240
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.divide_wrapper,
            [x, y],
            out,
            x_init=[x_arr, y_arr],
            place=place,
            atol=1e-3,
        )
241 242

    def test_grad(self):
243
        paddle.enable_static()
244 245 246 247 248 249 250 251 252 253
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
254
        # the shape of input variable should be clearly specified, not inlcude -1.
255
        shape = [2, 3, 4, 5]
256 257 258
        eps = 0.0001
        dtype = np.float64

G
GGBond8488 已提交
259 260
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape[1:-1], dtype)
261 262
        x.persistable = True
        y.persistable = True
263
        out = paddle.tensor.math._divide_with_axis(x, y, axis=1)
264 265 266 267
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[1:-1]).astype(dtype)
        y_arr[np.abs(y_arr) < 0.005] = 0.02

268 269 270
        gradient_checker.double_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3
        )
271 272

    def test_grad(self):
273
        paddle.enable_static()
274 275 276 277 278 279 280
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


281 282 283 284 285 286 287 288
class TestElementwiseAddTripleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        shape = [2, 3, 4, 5]
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
289 290
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape, dtype)
291 292
        x.persistable = True
        y.persistable = True
293
        out = paddle.add(x, y)
294 295 296
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape).astype(dtype)

297 298 299
        gradient_checker.triple_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
300 301

    def test_grad(self):
302
        paddle.enable_static()
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseAddBroadcastTripleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        shape = [2, 3, 4, 5]
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
318 319
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape[:-1], dtype)
320 321
        x.persistable = True
        y.persistable = True
322
        out = paddle.tensor.math._add_with_axis(x, y, axis=0)
323 324 325
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)

326 327 328
        gradient_checker.triple_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
329 330

    def test_grad(self):
331
        paddle.enable_static()
332 333 334 335 336 337 338
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


339
class TestElementwiseMulTripleGradCheck(unittest.TestCase):
340 341 342
    def multiply_wrapper(self, x):
        return paddle.multiply(x[0], x[1])

343 344 345 346 347 348 349
    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        shape = [2, 3, 4, 5]
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
350 351
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape, dtype)
352 353
        x.persistable = True
        y.persistable = True
354
        out = paddle.multiply(x, y)
355 356 357
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape).astype(dtype)

358 359 360 361 362 363 364 365 366 367
        gradient_checker.triple_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.multiply_wrapper,
            [x, y],
            out,
            x_init=[x_arr, y_arr],
            place=place,
        )
368 369

    def test_grad(self):
370
        paddle.enable_static()
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestElementwiseMulBroadcastTripleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        shape = [2, 3, 4, 5]
        eps = 0.005
        dtype = np.float64

G
GGBond8488 已提交
386 387
        x = paddle.static.data('x', shape, dtype)
        y = paddle.static.data('y', shape[:-1], dtype)
388 389
        x.persistable = True
        y.persistable = True
390
        out = paddle.tensor.math._add_with_axis(x, y, axis=0)
391 392 393
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)

394 395 396
        gradient_checker.triple_grad_check(
            [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps
        )
397 398

    def test_grad(self):
399
        paddle.enable_static()
400 401 402 403 404 405 406
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


407 408
if __name__ == "__main__":
    unittest.main()