diff --git a/ppcls/modeling/architectures/regnet.py b/ppcls/modeling/architectures/regnet.py index 8c2ec57e8eedf279260cae6c0eff6ec9bec6fa91..19ddaaad1b1ed3ebcc6557bcb19c753f8f33c50f 100644 --- a/ppcls/modeling/architectures/regnet.py +++ b/ppcls/modeling/architectures/regnet.py @@ -144,7 +144,7 @@ class BottleneckBlock(nn.Layer): self.se_block = SELayer( num_channels=w_b, num_filters=w_b, - reduction_channels=w_se, + reduction_ratio=w_se, name=name + "_branch2se") self.conv2 = ConvBNLayer( num_channels=w_b, diff --git a/ppcls/modeling/architectures/vgg.py b/ppcls/modeling/architectures/vgg.py index ab1b776395982c50b189df8644f53a028cebfbee..1c36e63d2dc540ebf7adb2c7b8a7aab3a3e159da 100644 --- a/ppcls/modeling/architectures/vgg.py +++ b/ppcls/modeling/architectures/vgg.py @@ -80,7 +80,7 @@ class VGGNet(nn.Layer): } assert self.layers in self.vgg_configure.keys(), \ "supported layers are {} but input layer is {}".format( - vgg_configure.keys(), layers) + self.vgg_configure.keys(), layers) self.groups = self.vgg_configure[self.layers] self._conv_block_1 = ConvBlock(3, 64, self.groups[0], name="conv1_") diff --git a/ppcls/modeling/architectures/xception.py b/ppcls/modeling/architectures/xception.py index 499d1be71a050c1d83ce97089f15d280acc05bf2..e7ff6c5225a33c5dbe95e89de0c6b6d05907fbdf 100644 --- a/ppcls/modeling/architectures/xception.py +++ b/ppcls/modeling/architectures/xception.py @@ -6,6 +6,7 @@ from paddle.nn import Conv2D, BatchNorm, Linear, Dropout from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D from paddle.nn.initializer import Uniform import math +import sys __all__ = ['Xception41', 'Xception65', 'Xception71']