Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
c09fe142
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c09fe142
编写于
10月 27, 2021
作者:
F
fuqianya
提交者:
GitHub
10月 27, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PaddlePaddle Hackathon] add DenseNet (#36069)
* add DenseNet
上级
737992eb
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
451 addition
and
1 deletion
+451
-1
python/paddle/tests/test_pretrained_model.py
python/paddle/tests/test_pretrained_model.py
+1
-1
python/paddle/tests/test_vision_models.py
python/paddle/tests/test_vision_models.py
+15
-0
python/paddle/vision/__init__.py
python/paddle/vision/__init__.py
+6
-0
python/paddle/vision/models/__init__.py
python/paddle/vision/models/__init__.py
+12
-0
python/paddle/vision/models/densenet.py
python/paddle/vision/models/densenet.py
+417
-0
未找到文件。
python/paddle/tests/test_pretrained_model.py
浏览文件 @
c09fe142
...
...
@@ -54,7 +54,7 @@ class TestPretrainedModel(unittest.TestCase):
def
test_models
(
self
):
arches
=
[
'mobilenet_v1'
,
'mobilenet_v2'
,
'resnet18'
,
'vgg16'
,
'alexnet'
,
'resnext50_32x4d'
,
'inception_v3'
'resnext50_32x4d'
,
'inception_v3'
,
'densenet121'
]
for
arch
in
arches
:
self
.
infer
(
arch
)
...
...
python/paddle/tests/test_vision_models.py
浏览文件 @
c09fe142
...
...
@@ -70,6 +70,21 @@ class TestVisonModels(unittest.TestCase):
def
test_resnet152
(
self
):
self
.
models_infer
(
'resnet152'
)
def
test_densenet121
(
self
):
self
.
models_infer
(
'densenet121'
)
def
test_densenet161
(
self
):
self
.
models_infer
(
'densenet161'
)
def
test_densenet169
(
self
):
self
.
models_infer
(
'densenet169'
)
def
test_densenet201
(
self
):
self
.
models_infer
(
'densenet201'
)
def
test_densenet264
(
self
):
self
.
models_infer
(
'densenet264'
)
def
test_alexnet
(
self
):
self
.
models_infer
(
'alexnet'
)
...
...
python/paddle/vision/__init__.py
浏览文件 @
c09fe142
...
...
@@ -44,6 +44,12 @@ from .models import vgg13 # noqa: F401
from
.models
import
vgg16
# noqa: F401
from
.models
import
vgg19
# noqa: F401
from
.models
import
LeNet
# noqa: F401
from
.models
import
DenseNet
# noqa: F401
from
.models
import
densenet121
# noqa: F401
from
.models
import
densenet161
# noqa: F401
from
.models
import
densenet169
# noqa: F401
from
.models
import
densenet201
# noqa: F401
from
.models
import
densenet264
# noqa: F401
from
.models
import
AlexNet
# noqa: F401
from
.models
import
alexnet
# noqa: F401
from
.models
import
ResNeXt
# noqa: F401
...
...
python/paddle/vision/models/__init__.py
浏览文件 @
c09fe142
...
...
@@ -28,6 +28,12 @@ from .vgg import vgg13 # noqa: F401
from
.vgg
import
vgg16
# noqa: F401
from
.vgg
import
vgg19
# noqa: F401
from
.lenet
import
LeNet
# noqa: F401
from
.densenet
import
DenseNet
# noqa: F401
from
.densenet
import
densenet121
# noqa: F401
from
.densenet
import
densenet161
# noqa: F401
from
.densenet
import
densenet169
# noqa: F401
from
.densenet
import
densenet201
# noqa: F401
from
.densenet
import
densenet264
# noqa: F401
from
.alexnet
import
AlexNet
# noqa: F401
from
.alexnet
import
alexnet
# noqa: F401
from
.resnext
import
ResNeXt
# noqa: F401
...
...
@@ -57,6 +63,12 @@ __all__ = [ #noqa
'MobileNetV2'
,
'mobilenet_v2'
,
'LeNet'
,
'DenseNet'
,
'densenet121'
,
'densenet161'
,
'densenet169'
,
'densenet201'
,
'densenet264'
,
'AlexNet'
,
'alexnet'
,
'ResNeXt'
,
...
...
python/paddle/vision/models/densenet.py
0 → 100644
浏览文件 @
c09fe142
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2D
,
MaxPool2D
,
AvgPool2D
from
paddle.nn.initializer
import
Uniform
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.utils.download
import
get_weights_path_from_url
__all__
=
[]
model_urls
=
{
'densenet121'
:
(
'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams'
,
'db1b239ed80a905290fd8b01d3af08e4'
),
'densenet161'
:
(
'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams'
,
'62158869cb315098bd25ddbfd308a853'
),
'densenet169'
:
(
'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams'
,
'82cc7c635c3f19098c748850efb2d796'
),
'densenet201'
:
(
'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams'
,
'16ca29565a7712329cf9e36e02caaf58'
),
'densenet264'
:
(
'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams'
,
'3270ce516b85370bba88cfdd9f60bff4'
),
}
class
BNACConvLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
pad
=
0
,
groups
=
1
,
act
=
"relu"
):
super
(
BNACConvLayer
,
self
).
__init__
()
self
.
_batch_norm
=
BatchNorm
(
num_channels
,
act
=
act
)
self
.
_conv
=
Conv2D
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
pad
,
groups
=
groups
,
weight_attr
=
ParamAttr
(),
bias_attr
=
False
)
def
forward
(
self
,
input
):
y
=
self
.
_batch_norm
(
input
)
y
=
self
.
_conv
(
y
)
return
y
class
DenseLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
growth_rate
,
bn_size
,
dropout
):
super
(
DenseLayer
,
self
).
__init__
()
self
.
dropout
=
dropout
self
.
bn_ac_func1
=
BNACConvLayer
(
num_channels
=
num_channels
,
num_filters
=
bn_size
*
growth_rate
,
filter_size
=
1
,
pad
=
0
,
stride
=
1
)
self
.
bn_ac_func2
=
BNACConvLayer
(
num_channels
=
bn_size
*
growth_rate
,
num_filters
=
growth_rate
,
filter_size
=
3
,
pad
=
1
,
stride
=
1
)
if
dropout
:
self
.
dropout_func
=
Dropout
(
p
=
dropout
,
mode
=
"downscale_in_infer"
)
def
forward
(
self
,
input
):
conv
=
self
.
bn_ac_func1
(
input
)
conv
=
self
.
bn_ac_func2
(
conv
)
if
self
.
dropout
:
conv
=
self
.
dropout_func
(
conv
)
conv
=
paddle
.
concat
([
input
,
conv
],
axis
=
1
)
return
conv
class
DenseBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_layers
,
bn_size
,
growth_rate
,
dropout
,
name
=
None
):
super
(
DenseBlock
,
self
).
__init__
()
self
.
dropout
=
dropout
self
.
dense_layer_func
=
[]
pre_channel
=
num_channels
for
layer
in
range
(
num_layers
):
self
.
dense_layer_func
.
append
(
self
.
add_sublayer
(
"{}_{}"
.
format
(
name
,
layer
+
1
),
DenseLayer
(
num_channels
=
pre_channel
,
growth_rate
=
growth_rate
,
bn_size
=
bn_size
,
dropout
=
dropout
)))
pre_channel
=
pre_channel
+
growth_rate
def
forward
(
self
,
input
):
conv
=
input
for
func
in
self
.
dense_layer_func
:
conv
=
func
(
conv
)
return
conv
class
TransitionLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_output_features
):
super
(
TransitionLayer
,
self
).
__init__
()
self
.
conv_ac_func
=
BNACConvLayer
(
num_channels
=
num_channels
,
num_filters
=
num_output_features
,
filter_size
=
1
,
pad
=
0
,
stride
=
1
)
self
.
pool2d_avg
=
AvgPool2D
(
kernel_size
=
2
,
stride
=
2
,
padding
=
0
)
def
forward
(
self
,
input
):
y
=
self
.
conv_ac_func
(
input
)
y
=
self
.
pool2d_avg
(
y
)
return
y
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
pad
=
0
,
groups
=
1
,
act
=
"relu"
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
pad
,
groups
=
groups
,
weight_attr
=
ParamAttr
(),
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
act
=
act
)
def
forward
(
self
,
input
):
y
=
self
.
_conv
(
input
)
y
=
self
.
_batch_norm
(
y
)
return
y
class
DenseNet
(
nn
.
Layer
):
"""DenseNet model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
layers (int): layers of densenet. Default: 121.
bn_size (int): expansion of growth rate in the middle layer. Default: 4.
dropout (float): dropout rate. Default: 0..
num_classes (int): output dim of last fc layer. Default: 1000.
with_pool (bool): use pool before the last fc layer or not. Default: True.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import DenseNet
# build model
densenet = DenseNet()
x = paddle.rand([1, 3, 224, 224])
out = densenet(x)
print(out.shape)
"""
def
__init__
(
self
,
layers
=
121
,
bn_size
=
4
,
dropout
=
0.
,
num_classes
=
1000
,
with_pool
=
True
):
super
(
DenseNet
,
self
).
__init__
()
self
.
num_classes
=
num_classes
self
.
with_pool
=
with_pool
supported_layers
=
[
121
,
161
,
169
,
201
,
264
]
assert
layers
in
supported_layers
,
\
"supported layers are {} but input layer is {}"
.
format
(
supported_layers
,
layers
)
densenet_spec
=
{
121
:
(
64
,
32
,
[
6
,
12
,
24
,
16
]),
161
:
(
96
,
48
,
[
6
,
12
,
36
,
24
]),
169
:
(
64
,
32
,
[
6
,
12
,
32
,
32
]),
201
:
(
64
,
32
,
[
6
,
12
,
48
,
32
]),
264
:
(
64
,
32
,
[
6
,
12
,
64
,
48
])
}
num_init_features
,
growth_rate
,
block_config
=
densenet_spec
[
layers
]
self
.
conv1_func
=
ConvBNLayer
(
num_channels
=
3
,
num_filters
=
num_init_features
,
filter_size
=
7
,
stride
=
2
,
pad
=
3
,
act
=
'relu'
)
self
.
pool2d_max
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_config
=
block_config
self
.
dense_block_func_list
=
[]
self
.
transition_func_list
=
[]
pre_num_channels
=
num_init_features
num_features
=
num_init_features
for
i
,
num_layers
in
enumerate
(
block_config
):
self
.
dense_block_func_list
.
append
(
self
.
add_sublayer
(
"db_conv_{}"
.
format
(
i
+
2
),
DenseBlock
(
num_channels
=
pre_num_channels
,
num_layers
=
num_layers
,
bn_size
=
bn_size
,
growth_rate
=
growth_rate
,
dropout
=
dropout
,
name
=
'conv'
+
str
(
i
+
2
))))
num_features
=
num_features
+
num_layers
*
growth_rate
pre_num_channels
=
num_features
if
i
!=
len
(
block_config
)
-
1
:
self
.
transition_func_list
.
append
(
self
.
add_sublayer
(
"tr_conv{}_blk"
.
format
(
i
+
2
),
TransitionLayer
(
num_channels
=
pre_num_channels
,
num_output_features
=
num_features
//
2
)))
pre_num_channels
=
num_features
//
2
num_features
=
num_features
//
2
self
.
batch_norm
=
BatchNorm
(
num_features
,
act
=
"relu"
)
if
self
.
with_pool
:
self
.
pool2d_avg
=
AdaptiveAvgPool2D
(
1
)
if
self
.
num_classes
>
0
:
stdv
=
1.0
/
math
.
sqrt
(
num_features
*
1.0
)
self
.
out
=
Linear
(
num_features
,
num_classes
,
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
())
def
forward
(
self
,
input
):
conv
=
self
.
conv1_func
(
input
)
conv
=
self
.
pool2d_max
(
conv
)
for
i
,
num_layers
in
enumerate
(
self
.
block_config
):
conv
=
self
.
dense_block_func_list
[
i
](
conv
)
if
i
!=
len
(
self
.
block_config
)
-
1
:
conv
=
self
.
transition_func_list
[
i
](
conv
)
conv
=
self
.
batch_norm
(
conv
)
if
self
.
with_pool
:
y
=
self
.
pool2d_avg
(
conv
)
if
self
.
num_classes
>
0
:
y
=
paddle
.
flatten
(
y
,
start_axis
=
1
,
stop_axis
=-
1
)
y
=
self
.
out
(
y
)
return
y
def
_densenet
(
arch
,
layers
,
pretrained
,
**
kwargs
):
model
=
DenseNet
(
layers
=
layers
,
**
kwargs
)
if
pretrained
:
assert
arch
in
model_urls
,
"{} model do not have a pretrained model now, you should set pretrained=False"
.
format
(
arch
)
weight_path
=
get_weights_path_from_url
(
model_urls
[
arch
][
0
],
model_urls
[
arch
][
1
])
param
=
paddle
.
load
(
weight_path
)
model
.
set_dict
(
param
)
return
model
def
densenet121
(
pretrained
=
False
,
**
kwargs
):
"""DenseNet 121-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
from paddle.vision.models import densenet121
# build model
model = densenet121()
# build model and load imagenet pretrained weight
# model = densenet121(pretrained=True)
"""
return
_densenet
(
'densenet121'
,
121
,
pretrained
,
**
kwargs
)
def
densenet161
(
pretrained
=
False
,
**
kwargs
):
"""DenseNet 161-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
from paddle.vision.models import densenet161
# build model
model = densenet161()
# build model and load imagenet pretrained weight
# model = densenet161(pretrained=True)
"""
return
_densenet
(
'densenet161'
,
161
,
pretrained
,
**
kwargs
)
def
densenet169
(
pretrained
=
False
,
**
kwargs
):
"""DenseNet 169-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
from paddle.vision.models import densenet169
# build model
model = densenet169()
# build model and load imagenet pretrained weight
# model = densenet169(pretrained=True)
"""
return
_densenet
(
'densenet169'
,
169
,
pretrained
,
**
kwargs
)
def
densenet201
(
pretrained
=
False
,
**
kwargs
):
"""DenseNet 201-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
from paddle.vision.models import densenet201
# build model
model = densenet201()
# build model and load imagenet pretrained weight
# model = densenet201(pretrained=True)
"""
return
_densenet
(
'densenet201'
,
201
,
pretrained
,
**
kwargs
)
def
densenet264
(
pretrained
=
False
,
**
kwargs
):
"""DenseNet 264-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
from paddle.vision.models import densenet264
# build model
model = densenet264()
# build model and load imagenet pretrained weight
# model = densenet264(pretrained=True)
"""
return
_densenet
(
'densenet264'
,
264
,
pretrained
,
**
kwargs
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录