test_conv_nn_grad.py 19.5 KB
Newer Older
L
lvmengsi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
H
hong 已提交
17
import paddle
L
lvmengsi 已提交
18 19 20 21 22 23 24 25 26 27 28 29

import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
import gradient_checker

from decorator_helper import prog_scope


class TestConvDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
L
liym27 已提交
30
        shape = [2, 4, 3, 3]
L
lvmengsi 已提交
31
        eps = 0.005
R
ronnywang 已提交
32
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
33
        x = layers.data('x', shape, False, dtype)
L
liym27 已提交
34 35 36 37 38 39 40
        y = layers.conv2d(x, 2, 1, groups=1, bias_attr=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
41 42 43
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
44 45 46 47 48 49 50 51 52 53

    def test_grad(self):
        places = [fluid.CPUPlace()]

        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Z
zhangchunle 已提交
54
class TestConvDoubleGradCheckTest0(unittest.TestCase):
L
liym27 已提交
55 56 57 58
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
R
ronnywang 已提交
59
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
60 61
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d(x, 2, 1, bias_attr=False)
L
lvmengsi 已提交
62 63 64 65 66 67
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
68 69 70
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
lvmengsi 已提交
71 72 73 74 75 76 77 78 79 80 81 82

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConvDoubleGradCheckTest1(unittest.TestCase):
    @prog_scope()
    def func(self, place):
L
liym27 已提交
83
        shape = [2, 3, 3, 3]
L
lvmengsi 已提交
84
        eps = 0.005
R
ronnywang 已提交
85
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
86
        x = layers.data('x', shape, False, dtype)
L
liym27 已提交
87
        y = layers.conv2d(x, 2, 1, padding=1, bias_attr=False)
L
lvmengsi 已提交
88 89 90 91 92 93
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
94 95 96
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
lvmengsi 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 3, 4, 2]
        eps = 0.005
R
ronnywang 已提交
111
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
112
        x = layers.data('x', shape, False, dtype)
113
        y = paddle.static.nn.conv3d(x, 2, 1, bias_attr=False)
L
lvmengsi 已提交
114 115 116 117 118 119
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
120 121 122
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
lvmengsi 已提交
123 124

    def test_grad(self):
125
        # places = [fluid.CPUPlace()]
H
hong 已提交
126
        places = []
L
lvmengsi 已提交
127 128 129 130 131 132 133 134 135 136 137
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheckTest1(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 5, 3, 2]
        eps = 0.005
R
ronnywang 已提交
138
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
139
        x = layers.data('x', shape, False, dtype)
140
        y = paddle.static.nn.conv3d(x, 2, 1, padding=1, bias_attr=False)
L
liym27 已提交
141 142 143 144 145 146
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
147 148 149
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_AsyPadding(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
164
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
165
        x = layers.data('x', shape, False, dtype)
166 167 168 169 170 171 172 173
        y = layers.conv2d(
            input=x,
            num_filters=2,
            filter_size=1,
            padding=[1, 0, 0, 1],
            bias_attr=False,
            use_cudnn=True,
        )
L
liym27 已提交
174 175 176 177 178 179
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
180 181 182
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_PaddingSAME(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
197
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
198
        x = layers.data('x', shape, False, dtype)
199 200 201 202 203 204 205 206
        y = layers.conv2d(
            input=x,
            num_filters=2,
            filter_size=1,
            padding="SAME",
            bias_attr=False,
            use_cudnn=True,
        )
L
liym27 已提交
207 208 209 210 211 212
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
213 214 215
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_PaddingVALID(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
230
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
231
        x = layers.data('x', shape, False, dtype)
232 233 234 235 236 237 238 239
        y = layers.conv2d(
            input=x,
            num_filters=2,
            filter_size=1,
            padding="VALID",
            bias_attr=False,
            use_cudnn=True,
        )
L
liym27 已提交
240 241 242 243 244 245
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
246 247 248
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_ChannelLast(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
263
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
264
        x = layers.data('x', shape, False, dtype)
265 266 267 268 269 270 271 272 273 274
        y = layers.conv2d(
            input=x,
            num_filters=2,
            filter_size=1,
            padding=[1, 1],
            bias_attr=False,
            use_cudnn=True,
            groups=1,
            data_format="NHWC",
        )
L
liym27 已提交
275 276 277 278 279 280
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
281 282 283
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
298
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
299
        x = layers.data('x', shape, False, dtype)
300 301 302 303 304 305 306 307 308 309
        y = layers.conv2d(
            input=x,
            num_filters=2,
            filter_size=1,
            padding=[1, 0, 1, 0],
            bias_attr=False,
            use_cudnn=True,
            groups=1,
            data_format="NHWC",
        )
L
liym27 已提交
310 311 312 313 314 315
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
316 317 318
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
319 320 321 322 323 324 325 326 327 328 329 330 331 332

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck_AsyPadding(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 2]
        eps = 0.005
R
ronnywang 已提交
333
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
334
        x = layers.data('x', shape, False, dtype)
335
        y = paddle.static.nn.conv3d(
336 337 338 339 340 341 342
            input=x,
            num_filters=2,
            filter_size=1,
            padding=[1, 0, 0, 1, 1, 2],
            bias_attr=False,
            use_cudnn=True,
        )
L
liym27 已提交
343 344 345 346 347 348
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
349 350 351
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DoubleGradCheck_PaddingSAME(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 2]
        eps = 0.005
R
ronnywang 已提交
366
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
367
        x = layers.data('x', shape, False, dtype)
368
        y = paddle.static.nn.conv3d(
369 370 371 372 373 374 375 376
            input=x,
            num_filters=2,
            filter_size=1,
            padding="SAME",
            groups=1,
            bias_attr=False,
            use_cudnn=True,
        )
L
liym27 已提交
377 378 379 380 381 382
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
383 384 385
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DoubleGradCheck_PaddingVALID(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3, 2]
        eps = 0.005
R
ronnywang 已提交
400
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
401
        x = layers.data('x', shape, False, dtype)
402
        y = paddle.static.nn.conv3d(
403 404 405 406 407 408 409
            input=x,
            num_filters=2,
            filter_size=1,
            padding="VALID",
            bias_attr=False,
            use_cudnn=True,
        )
L
liym27 已提交
410 411 412 413 414 415
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
416 417 418
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431 432

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 3]
        eps = 0.005
R
ronnywang 已提交
433
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
434
        x = layers.data('x', shape, False, dtype)
435
        y = paddle.static.nn.conv3d(
436 437 438 439 440 441 442 443 444
            input=x,
            num_filters=2,
            filter_size=1,
            padding=[1, 1, 1],
            bias_attr=False,
            use_cudnn=True,
            groups=1,
            data_format="NDHWC",
        )
L
liym27 已提交
445 446 447 448 449 450
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
451 452 453
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
liym27 已提交
454 455 456 457 458 459 460 461 462

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


C
cnn 已提交
463
class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase):
L
liym27 已提交
464 465 466 467
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 3]
        eps = 0.005
R
ronnywang 已提交
468
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
469
        x = layers.data('x', shape, False, dtype)
470
        y = paddle.static.nn.conv3d(
471 472 473 474 475 476 477 478 479
            input=x,
            num_filters=2,
            filter_size=1,
            padding=[1, 0, 1, 0, 1, 0],
            bias_attr=False,
            use_cudnn=True,
            groups=1,
            data_format="NDHWC",
        )
L
lvmengsi 已提交
480 481 482 483 484 485
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
486 487 488
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
L
lvmengsi 已提交
489 490 491 492 493 494 495 496 497

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


498 499 500 501 502
class TestDepthWiseConvDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
R
ronnywang 已提交
503
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
504 505
        x = layers.data('x', shape, False, dtype)

506
        # condition of depthwise conv:
507 508 509
        # use_cudnn == False
        # groups == filters
        # num_filters % num_channels == 0
510 511 512
        y = layers.conv2d(
            x, shape[1], 1, groups=shape[1], bias_attr=False, use_cudnn=False
        )
513 514 515 516 517 518
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
519 520 521
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps
        )
522 523 524 525 526 527 528 529 530

    def test_grad(self):
        places = []
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


531 532 533 534 535 536 537 538 539 540 541 542 543
class TestDepthWiseConvDoubleGradCheckCase1(unittest.TestCase):
    def depthwise_conv2d_wrapper(self, x):
        return paddle.nn.functional.conv2d(x[0], x[1], groups=4)

    @prog_scope()
    def func(self, place):
        x_shape = [2, 4, 3, 3]
        w_shape = [4, 1, 3, 3]
        eps = 0.005
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
        x = layers.data('x', x_shape, False, dtype)
        w = layers.data('w', w_shape, False, dtype)

544
        # condition of depthwise conv:
545 546 547 548 549 550 551 552
        # use_cudnn == False
        # groups == filters
        # num_filters % num_channels == 0

        y = paddle.nn.functional.conv2d(x, w, groups=4)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
        w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype)

553 554 555
        gradient_checker.double_grad_check(
            [x, w], y, x_init=[x_arr, w_arr], place=place, eps=eps
        )
556
        gradient_checker.double_grad_check_for_dygraph(
557 558
            self.depthwise_conv2d_wrapper,
            [x, w],
559 560
            y,
            x_init=[x_arr, w_arr],
561 562
            place=place,
        )
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589

    def test_grad(self):
        places = []
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck_NN(unittest.TestCase):
    def conv3d_wrapper(self, x):
        return paddle.nn.functional.conv3d(x[0], x[1])

    @prog_scope()
    def func(self, place):
        x_shape = [2, 3, 8, 8, 8]
        w_shape = [6, 3, 3, 3, 3]
        eps = 0.005
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
        x = layers.data('x', x_shape, False, dtype)
        w = layers.data('w', w_shape, False, dtype)
        x.persistable = True
        w.persistable = True
        y = paddle.nn.functional.conv3d(x, w)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
        w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype)

590 591 592 593 594 595
        gradient_checker.double_grad_check(
            [x, w], y, x_init=[x_arr, w_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.conv3d_wrapper, [x, w], y, x_init=[x_arr, w_arr], place=place
        )
596 597 598 599 600 601 602 603 604

    def test_grad(self):
        places = []
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


L
lvmengsi 已提交
605
if __name__ == "__main__":
H
hong 已提交
606
    paddle.enable_static()
L
lvmengsi 已提交
607
    unittest.main()