test_conv_nn_grad.py 23.0 KB
Newer Older
L
lvmengsi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
H
hong 已提交
17
import paddle
L
lvmengsi 已提交
18 19 20 21 22 23 24 25 26 27

import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
import gradient_checker

from decorator_helper import prog_scope


class TestConvDoubleGradCheck(unittest.TestCase):
28

L
lvmengsi 已提交
29 30
    @prog_scope()
    def func(self, place):
L
liym27 已提交
31
        shape = [2, 4, 3, 3]
L
lvmengsi 已提交
32
        eps = 0.005
R
ronnywang 已提交
33
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
34
        x = layers.data('x', shape, False, dtype)
L
liym27 已提交
35 36 37 38 39 40 41
        y = layers.conv2d(x, 2, 1, groups=1, bias_attr=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
42 43 44 45 46
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
47 48 49 50 51 52 53 54 55 56

    def test_grad(self):
        places = [fluid.CPUPlace()]

        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Z
zhangchunle 已提交
57
class TestConvDoubleGradCheckTest0(unittest.TestCase):
58

L
liym27 已提交
59 60 61 62
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
R
ronnywang 已提交
63
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
64 65
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d(x, 2, 1, bias_attr=False)
L
lvmengsi 已提交
66 67 68 69 70 71
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
72 73 74 75 76
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
lvmengsi 已提交
77 78 79 80 81 82 83 84 85 86

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConvDoubleGradCheckTest1(unittest.TestCase):
87

L
lvmengsi 已提交
88 89
    @prog_scope()
    def func(self, place):
L
liym27 已提交
90
        shape = [2, 3, 3, 3]
L
lvmengsi 已提交
91
        eps = 0.005
R
ronnywang 已提交
92
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
93
        x = layers.data('x', shape, False, dtype)
L
liym27 已提交
94
        y = layers.conv2d(x, 2, 1, padding=1, bias_attr=False)
L
lvmengsi 已提交
95 96 97 98 99 100
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
101 102 103 104 105
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
lvmengsi 已提交
106 107 108 109 110 111 112 113 114 115

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck(unittest.TestCase):
116

L
lvmengsi 已提交
117 118 119 120
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 3, 4, 2]
        eps = 0.005
R
ronnywang 已提交
121
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
122
        x = layers.data('x', shape, False, dtype)
L
liym27 已提交
123
        y = layers.conv3d(x, 2, 1, bias_attr=False)
L
lvmengsi 已提交
124 125 126 127 128 129
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
130 131 132 133 134
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
lvmengsi 已提交
135 136

    def test_grad(self):
H
hong 已提交
137 138
        #places = [fluid.CPUPlace()]
        places = []
L
lvmengsi 已提交
139 140 141 142 143 144 145
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheckTest1(unittest.TestCase):
146

L
lvmengsi 已提交
147 148 149 150
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 5, 3, 2]
        eps = 0.005
R
ronnywang 已提交
151
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
lvmengsi 已提交
152
        x = layers.data('x', shape, False, dtype)
L
liym27 已提交
153 154 155 156 157 158 159
        y = layers.conv3d(x, 2, 1, padding=1, bias_attr=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
160 161 162 163 164
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
165 166 167 168 169 170 171 172 173 174

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_AsyPadding(unittest.TestCase):
175

L
liym27 已提交
176 177 178 179
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
180
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
181
        x = layers.data('x', shape, False, dtype)
182 183 184 185 186 187
        y = layers.conv2d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 0, 0, 1],
                          bias_attr=False,
                          use_cudnn=True)
L
liym27 已提交
188 189 190 191 192 193
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
194 195 196 197 198
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
199 200 201 202 203 204 205 206 207 208

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_PaddingSAME(unittest.TestCase):
209

L
liym27 已提交
210 211 212 213
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
214
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
215
        x = layers.data('x', shape, False, dtype)
216 217 218 219 220 221
        y = layers.conv2d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding="SAME",
                          bias_attr=False,
                          use_cudnn=True)
L
liym27 已提交
222 223 224 225 226 227
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
228 229 230 231 232
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
233 234 235 236 237 238 239 240 241 242

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_PaddingVALID(unittest.TestCase):
243

L
liym27 已提交
244 245 246 247
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
248
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
249
        x = layers.data('x', shape, False, dtype)
250 251 252 253 254 255
        y = layers.conv2d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding="VALID",
                          bias_attr=False,
                          use_cudnn=True)
L
liym27 已提交
256 257 258 259 260 261
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
262 263 264 265 266
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
267 268 269 270 271 272 273 274 275 276

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_ChannelLast(unittest.TestCase):
277

L
liym27 已提交
278 279 280 281
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
282
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
283
        x = layers.data('x', shape, False, dtype)
284 285 286 287 288 289 290 291
        y = layers.conv2d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 1],
                          bias_attr=False,
                          use_cudnn=True,
                          groups=1,
                          data_format="NHWC")
L
liym27 已提交
292 293 294 295 296 297
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
298 299 300 301 302
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
303 304 305 306 307 308 309 310 311 312

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv2DoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase):
313

L
liym27 已提交
314 315 316 317
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
R
ronnywang 已提交
318
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
319
        x = layers.data('x', shape, False, dtype)
320 321 322 323 324 325 326 327
        y = layers.conv2d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 0, 1, 0],
                          bias_attr=False,
                          use_cudnn=True,
                          groups=1,
                          data_format="NHWC")
L
liym27 已提交
328 329 330 331 332 333
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
334 335 336 337 338
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
339 340 341 342 343 344 345 346 347 348

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck_AsyPadding(unittest.TestCase):
349

L
liym27 已提交
350 351 352 353
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 2]
        eps = 0.005
R
ronnywang 已提交
354
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
355
        x = layers.data('x', shape, False, dtype)
356 357 358 359 360 361
        y = layers.conv3d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 0, 0, 1, 1, 2],
                          bias_attr=False,
                          use_cudnn=True)
L
liym27 已提交
362 363 364 365 366 367
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
368 369 370 371 372
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
373 374 375 376 377 378 379 380 381 382

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DoubleGradCheck_PaddingSAME(unittest.TestCase):
383

L
liym27 已提交
384 385 386 387
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 2]
        eps = 0.005
R
ronnywang 已提交
388
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
389
        x = layers.data('x', shape, False, dtype)
390 391 392 393 394 395 396
        y = layers.conv3d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding="SAME",
                          groups=1,
                          bias_attr=False,
                          use_cudnn=True)
L
liym27 已提交
397 398 399 400 401 402
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
403 404 405 406 407
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
408 409 410 411 412 413 414 415 416 417

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DoubleGradCheck_PaddingVALID(unittest.TestCase):
418

L
liym27 已提交
419 420 421 422
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 3, 3, 2]
        eps = 0.005
R
ronnywang 已提交
423
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
424
        x = layers.data('x', shape, False, dtype)
425 426 427 428 429 430
        y = layers.conv3d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding="VALID",
                          bias_attr=False,
                          use_cudnn=True)
L
liym27 已提交
431 432 433 434 435 436
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
437 438 439 440 441
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
442 443 444 445 446 447 448 449 450 451

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase):
452

L
liym27 已提交
453 454 455 456
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 3]
        eps = 0.005
R
ronnywang 已提交
457
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
458
        x = layers.data('x', shape, False, dtype)
459 460 461 462 463 464 465 466
        y = layers.conv3d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 1, 1],
                          bias_attr=False,
                          use_cudnn=True,
                          groups=1,
                          data_format="NDHWC")
L
liym27 已提交
467 468 469 470 471 472
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
473 474 475 476 477
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
liym27 已提交
478 479 480 481 482 483 484 485 486

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


C
cnn 已提交
487
class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase):
488

L
liym27 已提交
489 490 491 492
    @prog_scope()
    def func(self, place):
        shape = [2, 2, 2, 2, 3]
        eps = 0.005
R
ronnywang 已提交
493
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
L
liym27 已提交
494
        x = layers.data('x', shape, False, dtype)
495 496 497 498 499 500 501 502
        y = layers.conv3d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 0, 1, 0, 1, 0],
                          bias_attr=False,
                          use_cudnn=True,
                          groups=1,
                          data_format="NDHWC")
L
lvmengsi 已提交
503 504 505 506 507 508
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
509 510 511 512 513
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
L
lvmengsi 已提交
514 515 516 517 518 519 520 521 522

    def test_grad(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


523
class TestDepthWiseConvDoubleGradCheck(unittest.TestCase):
524

525 526 527 528
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
R
ronnywang 已提交
529
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
530 531
        x = layers.data('x', shape, False, dtype)

532
        # condition of depthwise conv:
533 534 535
        # use_cudnn == False
        # groups == filters
        # num_filters % num_channels == 0
536 537 538 539 540 541
        y = layers.conv2d(x,
                          shape[1],
                          1,
                          groups=shape[1],
                          bias_attr=False,
                          use_cudnn=False)
542 543 544 545 546 547
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
548 549 550 551 552
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
553 554 555 556 557 558 559 560 561

    def test_grad(self):
        places = []
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


562
class TestDepthWiseConvDoubleGradCheckCase1(unittest.TestCase):
563

564 565 566 567 568 569 570 571 572 573 574 575
    def depthwise_conv2d_wrapper(self, x):
        return paddle.nn.functional.conv2d(x[0], x[1], groups=4)

    @prog_scope()
    def func(self, place):
        x_shape = [2, 4, 3, 3]
        w_shape = [4, 1, 3, 3]
        eps = 0.005
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
        x = layers.data('x', x_shape, False, dtype)
        w = layers.data('w', w_shape, False, dtype)

576
        # condition of depthwise conv:
577 578 579 580 581 582 583 584
        # use_cudnn == False
        # groups == filters
        # num_filters % num_channels == 0

        y = paddle.nn.functional.conv2d(x, w, groups=4)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
        w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype)

585 586 587 588 589
        gradient_checker.double_grad_check([x, w],
                                           y,
                                           x_init=[x_arr, w_arr],
                                           place=place,
                                           eps=eps)
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
        gradient_checker.double_grad_check_for_dygraph(
            self.depthwise_conv2d_wrapper, [x, w],
            y,
            x_init=[x_arr, w_arr],
            place=place)

    def test_grad(self):
        places = []
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConv3DDoubleGradCheck_NN(unittest.TestCase):
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
    def conv3d_wrapper(self, x):
        return paddle.nn.functional.conv3d(x[0], x[1])

    @prog_scope()
    def func(self, place):
        x_shape = [2, 3, 8, 8, 8]
        w_shape = [6, 3, 3, 3, 3]
        eps = 0.005
        dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64
        x = layers.data('x', x_shape, False, dtype)
        w = layers.data('w', w_shape, False, dtype)
        x.persistable = True
        w.persistable = True
        y = paddle.nn.functional.conv3d(x, w)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
        w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype)

623 624 625 626 627 628 629 630 631 632
        gradient_checker.double_grad_check([x, w],
                                           y,
                                           x_init=[x_arr, w_arr],
                                           place=place,
                                           eps=eps)
        gradient_checker.double_grad_check_for_dygraph(self.conv3d_wrapper,
                                                       [x, w],
                                                       y,
                                                       x_init=[x_arr, w_arr],
                                                       place=place)
633 634 635 636 637 638 639 640 641

    def test_grad(self):
        places = []
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


L
lvmengsi 已提交
642
if __name__ == "__main__":
H
hong 已提交
643
    paddle.enable_static()
L
lvmengsi 已提交
644
    unittest.main()