resnet.py 21.5 KB
Newer Older
L
LielinJiang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import division
from __future__ import print_function

L
LielinJiang 已提交
18 19
import paddle
import paddle.nn as nn
L
LielinJiang 已提交
20

21
from paddle.utils.download import get_weights_path_from_url
L
LielinJiang 已提交
22

23
__all__ = []
L
LielinJiang 已提交
24 25 26

model_urls = {
    'resnet18': ('https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams',
L
LielinJiang 已提交
27
                 'cf548f46534aa3560945be4b95cd11c4'),
L
LielinJiang 已提交
28
    'resnet34': ('https://paddle-hapi.bj.bcebos.com/models/resnet34.pdparams',
L
LielinJiang 已提交
29
                 '8d2275cf8706028345f78ac0e1d31969'),
L
LielinJiang 已提交
30
    'resnet50': ('https://paddle-hapi.bj.bcebos.com/models/resnet50.pdparams',
L
LielinJiang 已提交
31
                 'ca6f485ee1ab0492d38f323885b0ad80'),
L
LielinJiang 已提交
32
    'resnet101': ('https://paddle-hapi.bj.bcebos.com/models/resnet101.pdparams',
L
LielinJiang 已提交
33
                  '02f35f034ca3858e1e54d4036443c92d'),
L
LielinJiang 已提交
34
    'resnet152': ('https://paddle-hapi.bj.bcebos.com/models/resnet152.pdparams',
L
LielinJiang 已提交
35
                  '7ad16a2f1e7333859ff986138630fd7a'),
36 37 38 39 40 41
    'resnext50_32x4d':
    ('https://paddle-hapi.bj.bcebos.com/models/resnext50_32x4d.pdparams',
     'dc47483169be7d6f018fcbb7baf8775d'),
    "resnext50_64x4d":
    ('https://paddle-hapi.bj.bcebos.com/models/resnext50_64x4d.pdparams',
     '063d4b483e12b06388529450ad7576db'),
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
    'resnext101_32x4d':
    ('https://paddle-hapi.bj.bcebos.com/models/resnext101_32x4d.pdparams',
     '967b090039f9de2c8d06fe994fb9095f'),
    'resnext101_64x4d':
    ('https://paddle-hapi.bj.bcebos.com/models/resnext101_64x4d.pdparams',
     '98e04e7ca616a066699230d769d03008'),
    'resnext152_32x4d':
    ('https://paddle-hapi.bj.bcebos.com/models/resnext152_32x4d.pdparams',
     '18ff0beee21f2efc99c4b31786107121'),
    'resnext152_64x4d':
    ('https://paddle-hapi.bj.bcebos.com/models/resnext152_64x4d.pdparams',
     '77c4af00ca42c405fa7f841841959379'),
    'wide_resnet50_2':
    ('https://paddle-hapi.bj.bcebos.com/models/wide_resnet50_2.pdparams',
     '0282f804d73debdab289bd9fea3fa6dc'),
    'wide_resnet101_2':
    ('https://paddle-hapi.bj.bcebos.com/models/wide_resnet101_2.pdparams',
     'd4360a2d23657f059216f5d5a1a9ac93'),
L
LielinJiang 已提交
60 61 62
}


L
LielinJiang 已提交
63 64 65
class BasicBlock(nn.Layer):
    expansion = 1

L
LielinJiang 已提交
66
    def __init__(self,
L
LielinJiang 已提交
67 68
                 inplanes,
                 planes,
L
LielinJiang 已提交
69
                 stride=1,
L
LielinJiang 已提交
70
                 downsample=None,
L
LielinJiang 已提交
71
                 groups=1,
L
LielinJiang 已提交
72 73 74
                 base_width=64,
                 dilation=1,
                 norm_layer=None):
L
LielinJiang 已提交
75
        super(BasicBlock, self).__init__()
L
LielinJiang 已提交
76
        if norm_layer is None:
C
cnn 已提交
77
            norm_layer = nn.BatchNorm2D
L
LielinJiang 已提交
78

L
LielinJiang 已提交
79 80 81
        if dilation > 1:
            raise NotImplementedError(
                "Dilation > 1 not supported in BasicBlock")
L
LielinJiang 已提交
82

83 84 85 86 87 88
        self.conv1 = nn.Conv2D(inplanes,
                               planes,
                               3,
                               padding=1,
                               stride=stride,
                               bias_attr=False)
L
LielinJiang 已提交
89 90
        self.bn1 = norm_layer(planes)
        self.relu = nn.ReLU()
C
cnn 已提交
91
        self.conv2 = nn.Conv2D(planes, planes, 3, padding=1, bias_attr=False)
L
LielinJiang 已提交
92 93 94
        self.bn2 = norm_layer(planes)
        self.downsample = downsample
        self.stride = stride
L
LielinJiang 已提交
95

L
LielinJiang 已提交
96 97
    def forward(self, x):
        identity = x
L
LielinJiang 已提交
98

L
LielinJiang 已提交
99 100 101
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
L
LielinJiang 已提交
102

L
LielinJiang 已提交
103 104
        out = self.conv2(out)
        out = self.bn2(out)
L
LielinJiang 已提交
105

L
LielinJiang 已提交
106 107
        if self.downsample is not None:
            identity = self.downsample(x)
L
LielinJiang 已提交
108

L
LielinJiang 已提交
109 110
        out += identity
        out = self.relu(out)
L
LielinJiang 已提交
111

L
LielinJiang 已提交
112
        return out
L
LielinJiang 已提交
113

L
LielinJiang 已提交
114 115

class BottleneckBlock(nn.Layer):
L
LielinJiang 已提交
116 117 118

    expansion = 4

L
LielinJiang 已提交
119 120 121 122 123 124 125 126 127
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 norm_layer=None):
L
LielinJiang 已提交
128
        super(BottleneckBlock, self).__init__()
L
LielinJiang 已提交
129
        if norm_layer is None:
C
cnn 已提交
130
            norm_layer = nn.BatchNorm2D
L
LielinJiang 已提交
131 132
        width = int(planes * (base_width / 64.)) * groups

C
cnn 已提交
133
        self.conv1 = nn.Conv2D(inplanes, width, 1, bias_attr=False)
L
LielinJiang 已提交
134 135
        self.bn1 = norm_layer(width)

136 137 138 139 140 141 142 143
        self.conv2 = nn.Conv2D(width,
                               width,
                               3,
                               padding=dilation,
                               stride=stride,
                               groups=groups,
                               dilation=dilation,
                               bias_attr=False)
L
LielinJiang 已提交
144
        self.bn2 = norm_layer(width)
L
LielinJiang 已提交
145

146 147 148 149
        self.conv3 = nn.Conv2D(width,
                               planes * self.expansion,
                               1,
                               bias_attr=False)
L
LielinJiang 已提交
150 151 152 153
        self.bn3 = norm_layer(planes * self.expansion)
        self.relu = nn.ReLU()
        self.downsample = downsample
        self.stride = stride
L
LielinJiang 已提交
154

L
LielinJiang 已提交
155 156
    def forward(self, x):
        identity = x
L
LielinJiang 已提交
157

L
LielinJiang 已提交
158 159 160
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
L
LielinJiang 已提交
161

L
LielinJiang 已提交
162 163 164
        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)
L
LielinJiang 已提交
165

L
LielinJiang 已提交
166 167
        out = self.conv3(out)
        out = self.bn3(out)
L
LielinJiang 已提交
168

L
LielinJiang 已提交
169 170
        if self.downsample is not None:
            identity = self.downsample(x)
L
LielinJiang 已提交
171

L
LielinJiang 已提交
172 173
        out += identity
        out = self.relu(out)
L
LielinJiang 已提交
174

L
LielinJiang 已提交
175
        return out
L
LielinJiang 已提交
176

L
LielinJiang 已提交
177 178

class ResNet(nn.Layer):
L
LielinJiang 已提交
179 180 181 182 183
    """ResNet model from
    `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_

    Args:
        Block (BasicBlock|BottleneckBlock): block module of model.
184 185 186
        depth (int, optional): layers of resnet, Default: 50.
        width (int, optional): base width per convolution group for each convolution block, Default: 64.
        num_classes (int, optional): output dim of last fc layer. If num_classes <=0, last fc layer
L
LielinJiang 已提交
187
                            will not be defined. Default: 1000.
188 189
        with_pool (bool, optional): use pool before the last fc layer or not. Default: True.
        groups (int, optional): number of groups for each convolution block, Default: 1.
L
LielinJiang 已提交
190 191 192 193

    Examples:
        .. code-block:: python

194
            import paddle
195 196
            from paddle.vision.models import ResNet
            from paddle.vision.models.resnet import BottleneckBlock, BasicBlock
L
LielinJiang 已提交
197

198 199 200 201
            # build ResNet with 18 layers
            resnet18 = ResNet(BasicBlock, 18)

            # build ResNet with 50 layers
L
LielinJiang 已提交
202 203
            resnet50 = ResNet(BottleneckBlock, 50)

204
            # build Wide ResNet model
205 206
            wide_resnet50_2 = ResNet(BottleneckBlock, 50, width=64*2)

207 208
            # build ResNeXt model
            resnext50_32x4d = ResNet(BottleneckBlock, 50, width=4, groups=32)
L
LielinJiang 已提交
209

210 211 212 213
            x = paddle.rand([1, 3, 224, 224])
            out = resnet18(x)

            print(out.shape)
214
            # [1, 1000]
215

L
LielinJiang 已提交
216 217
    """

218 219 220 221 222
    def __init__(self,
                 block,
                 depth=50,
                 width=64,
                 num_classes=1000,
223 224
                 with_pool=True,
                 groups=1):
L
LielinJiang 已提交
225
        super(ResNet, self).__init__()
L
LielinJiang 已提交
226
        layer_cfg = {
L
LielinJiang 已提交
227 228 229 230
            18: [2, 2, 2, 2],
            34: [3, 4, 6, 3],
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
L
LielinJiang 已提交
231
            152: [3, 8, 36, 3]
L
LielinJiang 已提交
232
        }
L
LielinJiang 已提交
233
        layers = layer_cfg[depth]
234
        self.groups = groups
235
        self.base_width = width
L
LielinJiang 已提交
236 237
        self.num_classes = num_classes
        self.with_pool = with_pool
C
cnn 已提交
238
        self._norm_layer = nn.BatchNorm2D
L
LielinJiang 已提交
239 240 241

        self.inplanes = 64
        self.dilation = 1
L
LielinJiang 已提交
242

243 244 245 246 247 248
        self.conv1 = nn.Conv2D(3,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias_attr=False)
L
LielinJiang 已提交
249 250
        self.bn1 = self._norm_layer(self.inplanes)
        self.relu = nn.ReLU()
C
cnn 已提交
251
        self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
L
LielinJiang 已提交
252 253 254 255
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
L
LielinJiang 已提交
256
        if with_pool:
C
cnn 已提交
257
            self.avgpool = nn.AdaptiveAvgPool2D((1, 1))
L
LielinJiang 已提交
258 259

        if num_classes > 0:
L
LielinJiang 已提交
260 261 262 263 264 265 266 267 268 269 270
            self.fc = nn.Linear(512 * block.expansion, num_classes)

    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
        norm_layer = self._norm_layer
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
271 272 273 274 275 276 277
                nn.Conv2D(self.inplanes,
                          planes * block.expansion,
                          1,
                          stride=stride,
                          bias_attr=False),
                norm_layer(planes * block.expansion),
            )
L
LielinJiang 已提交
278 279 280

        layers = []
        layers.append(
281 282
            block(self.inplanes, planes, stride, downsample, self.groups,
                  self.base_width, previous_dilation, norm_layer))
L
LielinJiang 已提交
283 284
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
285
            layers.append(
286 287 288 289 290
                block(self.inplanes,
                      planes,
                      groups=self.groups,
                      base_width=self.base_width,
                      norm_layer=norm_layer))
L
LielinJiang 已提交
291 292 293 294 295 296 297 298 299 300 301 302 303

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

L
LielinJiang 已提交
304
        if self.with_pool:
L
LielinJiang 已提交
305 306 307 308
            x = self.avgpool(x)

        if self.num_classes > 0:
            x = paddle.flatten(x, 1)
L
LielinJiang 已提交
309
            x = self.fc(x)
L
LielinJiang 已提交
310

L
LielinJiang 已提交
311 312 313 314 315 316 317 318 319 320
        return x


def _resnet(arch, Block, depth, pretrained, **kwargs):
    model = ResNet(Block, depth, **kwargs)
    if pretrained:
        assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format(
            arch)
        weight_path = get_weights_path_from_url(model_urls[arch][0],
                                                model_urls[arch][1])
321 322

        param = paddle.load(weight_path)
323 324
        model.set_dict(param)

L
LielinJiang 已提交
325 326 327 328
    return model


def resnet18(pretrained=False, **kwargs):
329 330 331
    """ResNet 18-layer model from
    `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_

L
LielinJiang 已提交
332
    Args:
333
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
L
LielinJiang 已提交
334 335 336 337

    Examples:
        .. code-block:: python

338
            import paddle
339
            from paddle.vision.models import resnet18
L
LielinJiang 已提交
340 341 342 343 344 345

            # build model
            model = resnet18()

            # build model and load imagenet pretrained weight
            # model = resnet18(pretrained=True)
346 347 348 349 350

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
351
            # [1, 1000]
L
LielinJiang 已提交
352 353 354 355 356
    """
    return _resnet('resnet18', BasicBlock, 18, pretrained, **kwargs)


def resnet34(pretrained=False, **kwargs):
357 358 359
    """ResNet 34-layer model from
    `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_

L
LielinJiang 已提交
360
    Args:
361
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
362

L
LielinJiang 已提交
363 364 365
    Examples:
        .. code-block:: python

366
            import paddle
367
            from paddle.vision.models import resnet34
L
LielinJiang 已提交
368 369 370 371 372 373

            # build model
            model = resnet34()

            # build model and load imagenet pretrained weight
            # model = resnet34(pretrained=True)
374 375 376 377 378

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
379
            # [1, 1000]
L
LielinJiang 已提交
380 381 382 383 384
    """
    return _resnet('resnet34', BasicBlock, 34, pretrained, **kwargs)


def resnet50(pretrained=False, **kwargs):
385 386 387
    """ResNet 50-layer model from
    `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_

L
LielinJiang 已提交
388
    Args:
389
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
L
LielinJiang 已提交
390 391 392 393

    Examples:
        .. code-block:: python

394
            import paddle
395
            from paddle.vision.models import resnet50
L
LielinJiang 已提交
396 397 398 399 400 401

            # build model
            model = resnet50()

            # build model and load imagenet pretrained weight
            # model = resnet50(pretrained=True)
402 403 404 405 406

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
407
            # [1, 1000]
L
LielinJiang 已提交
408 409 410 411 412
    """
    return _resnet('resnet50', BottleneckBlock, 50, pretrained, **kwargs)


def resnet101(pretrained=False, **kwargs):
413 414 415
    """ResNet 101-layer model from
    `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_

L
LielinJiang 已提交
416
    Args:
417
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
L
LielinJiang 已提交
418 419 420 421

    Examples:
        .. code-block:: python

422
            import paddle
423
            from paddle.vision.models import resnet101
L
LielinJiang 已提交
424 425 426 427 428 429

            # build model
            model = resnet101()

            # build model and load imagenet pretrained weight
            # model = resnet101(pretrained=True)
430 431 432 433 434

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
435
            # [1, 1000]
L
LielinJiang 已提交
436 437 438 439 440
    """
    return _resnet('resnet101', BottleneckBlock, 101, pretrained, **kwargs)


def resnet152(pretrained=False, **kwargs):
441 442 443
    """ResNet 152-layer model from
    `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_

L
LielinJiang 已提交
444
    Args:
445
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
L
LielinJiang 已提交
446 447 448 449

    Examples:
        .. code-block:: python

450
            import paddle
451
            from paddle.vision.models import resnet152
L
LielinJiang 已提交
452 453 454 455 456 457

            # build model
            model = resnet152()

            # build model and load imagenet pretrained weight
            # model = resnet152(pretrained=True)
458 459 460 461 462

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
463
            # [1, 1000]
L
LielinJiang 已提交
464 465
    """
    return _resnet('resnet152', BottleneckBlock, 152, pretrained, **kwargs)
466 467


468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
def resnext50_32x4d(pretrained=False, **kwargs):
    """ResNeXt-50 32x4d model from
    `"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
    
    Args:
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import resnext50_32x4d

            # build model
            model = resnext50_32x4d()

            # build model and load imagenet pretrained weight
            # model = resnext50_32x4d(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
            # [1, 1000]
    """
    kwargs['groups'] = 32
    kwargs['width'] = 4
    return _resnet('resnext50_32x4d', BottleneckBlock, 50, pretrained, **kwargs)


def resnext50_64x4d(pretrained=False, **kwargs):
    """ResNeXt-50 64x4d model from
    `"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
    
    Args:
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import resnext50_64x4d

            # build model
            model = resnext50_64x4d()

            # build model and load imagenet pretrained weight
            # model = resnext50_64x4d(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
            # [1, 1000]
    """
    kwargs['groups'] = 64
    kwargs['width'] = 4
    return _resnet('resnext50_64x4d', BottleneckBlock, 50, pretrained, **kwargs)


def resnext101_32x4d(pretrained=False, **kwargs):
    """ResNeXt-101 32x4d model from
    `"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
    
    Args:
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import resnext101_32x4d

            # build model
            model = resnext101_32x4d()

            # build model and load imagenet pretrained weight
            # model = resnext101_32x4d(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
            # [1, 1000]
    """
    kwargs['groups'] = 32
    kwargs['width'] = 4
    return _resnet('resnext101_32x4d', BottleneckBlock, 101, pretrained,
                   **kwargs)


def resnext101_64x4d(pretrained=False, **kwargs):
    """ResNeXt-101 64x4d model from
    `"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
    
    Args:
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import resnext101_64x4d

            # build model
            model = resnext101_64x4d()

            # build model and load imagenet pretrained weight
            # model = resnext101_64x4d(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
            # [1, 1000]
    """
    kwargs['groups'] = 64
    kwargs['width'] = 4
    return _resnet('resnext101_64x4d', BottleneckBlock, 101, pretrained,
                   **kwargs)


def resnext152_32x4d(pretrained=False, **kwargs):
    """ResNeXt-152 32x4d model from
    `"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
    
    Args:
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import resnext152_32x4d

            # build model
            model = resnext152_32x4d()

            # build model and load imagenet pretrained weight
            # model = resnext152_32x4d(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
            # [1, 1000]
    """
    kwargs['groups'] = 32
    kwargs['width'] = 4
    return _resnet('resnext152_32x4d', BottleneckBlock, 152, pretrained,
                   **kwargs)


def resnext152_64x4d(pretrained=False, **kwargs):
    """ResNeXt-152 64x4d model from
    `"Aggregated Residual Transformations for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
    
    Args:
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import resnext152_64x4d

            # build model
            model = resnext152_64x4d()

            # build model and load imagenet pretrained weight
            # model = resnext152_64x4d(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
            # [1, 1000]
    """
    kwargs['groups'] = 64
    kwargs['width'] = 4
    return _resnet('resnext152_64x4d', BottleneckBlock, 152, pretrained,
                   **kwargs)


652 653 654 655 656
def wide_resnet50_2(pretrained=False, **kwargs):
    """Wide ResNet-50-2 model from
    `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.

    Args:
657
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import wide_resnet50_2

            # build model
            model = wide_resnet50_2()

            # build model and load imagenet pretrained weight
            # model = wide_resnet50_2(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
675
            # [1, 1000]
676 677 678 679 680 681 682 683 684 685
    """
    kwargs['width'] = 64 * 2
    return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)


def wide_resnet101_2(pretrained=False, **kwargs):
    """Wide ResNet-101-2 model from
    `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.

    Args:
686
        pretrained (bool, optional): If True, returns a model pre-trained on ImageNet. Default: False.
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.models import wide_resnet101_2

            # build model
            model = wide_resnet101_2()

            # build model and load imagenet pretrained weight
            # model = wide_resnet101_2(pretrained=True)

            x = paddle.rand([1, 3, 224, 224])
            out = model(x)

            print(out.shape)
704
            # [1, 1000]
705 706 707 708
    """
    kwargs['width'] = 64 * 2
    return _resnet('wide_resnet101_2', BottleneckBlock, 101, pretrained,
                   **kwargs)