Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
hapi
提交
43ec4521
H
hapi
项目概览
PaddlePaddle
/
hapi
通知
11
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
H
hapi
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
43ec4521
编写于
4月 04, 2020
作者:
L
LielinJiang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add docs
上级
e51b938b
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
82 addition
and
404 deletion
+82
-404
image_classification/main.py
image_classification/main.py
+8
-2
models/mobilenet.py
models/mobilenet.py
+0
-283
models/mobilenetv1.py
models/mobilenetv1.py
+11
-50
models/mobilenetv2.py
models/mobilenetv2.py
+17
-52
models/resnet.py
models/resnet.py
+34
-0
models/vgg.py
models/vgg.py
+12
-17
未找到文件。
image_classification/main.py
浏览文件 @
43ec4521
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
@@ -42,7 +41,8 @@ def make_optimizer(step_per_epoch, parameter_list=None):
...
@@ -42,7 +41,8 @@ def make_optimizer(step_per_epoch, parameter_list=None):
weight_decay
=
FLAGS
.
weight_decay
weight_decay
=
FLAGS
.
weight_decay
if
lr_scheduler
==
'piecewise'
:
if
lr_scheduler
==
'piecewise'
:
boundaries
=
[
step_per_epoch
*
e
for
e
in
[
30
,
60
,
80
]]
milestones
=
FLAGS
.
milestones
boundaries
=
[
step_per_epoch
*
e
for
e
in
milestones
]
values
=
[
base_lr
*
(
0.1
**
i
)
for
i
in
range
(
len
(
boundaries
)
+
1
)]
values
=
[
base_lr
*
(
0.1
**
i
)
for
i
in
range
(
len
(
boundaries
)
+
1
)]
learning_rate
=
fluid
.
layers
.
piecewise_decay
(
learning_rate
=
fluid
.
layers
.
piecewise_decay
(
boundaries
=
boundaries
,
values
=
values
)
boundaries
=
boundaries
,
values
=
values
)
...
@@ -155,6 +155,12 @@ if __name__ == '__main__':
...
@@ -155,6 +155,12 @@ if __name__ == '__main__':
default
=
'piecewise'
,
default
=
'piecewise'
,
type
=
str
,
type
=
str
,
help
=
"learning rate scheduler"
)
help
=
"learning rate scheduler"
)
parser
.
add_argument
(
"--milestones"
,
nargs
=
'+'
,
type
=
int
,
default
=
[
30
,
60
,
80
],
help
=
"piecewise decay milestones"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--weight-decay"
,
default
=
1e-4
,
type
=
float
,
help
=
"weight decay"
)
"--weight-decay"
,
default
=
1e-4
,
type
=
float
,
help
=
"weight decay"
)
parser
.
add_argument
(
"--momentum"
,
default
=
0.9
,
type
=
float
,
help
=
"momentum"
)
parser
.
add_argument
(
"--momentum"
,
default
=
0.9
,
type
=
float
,
help
=
"momentum"
)
...
...
models/mobilenet.py
已删除
100644 → 0
浏览文件 @
e51b938b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
time
import
math
import
sys
import
numpy
as
np
import
argparse
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
from
model
import
Model
from
.download
import
get_weights_path
__all__
=
[
'MobileNetV2'
,
'mobilnetv2_x0_25'
,
'mobilnetv2_x0_5'
,
'mobilnetv2_x0_75'
,
'mobilnetv2_x1_0'
,
'mobilnetv2_x1_25'
,
'mobilnetv2_x1_5'
,
'mobilnetv2_x1_75'
,
'mobilnetv2_x2_0'
]
model_urls
=
{}
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
filter_size
,
num_filters
,
stride
,
padding
,
channels
=
None
,
num_groups
=
1
,
use_cudnn
=
True
):
super
(
ConvBNLayer
,
self
).
__init__
()
tmp_param
=
ParamAttr
(
name
=
self
.
full_name
()
+
"_weights"
)
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
tmp_param
,
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
param_attr
=
ParamAttr
(
name
=
self
.
full_name
()
+
"_bn"
+
"_scale"
),
bias_attr
=
ParamAttr
(
name
=
self
.
full_name
()
+
"_bn"
+
"_offset"
),
moving_mean_name
=
self
.
full_name
()
+
"_bn"
+
'_mean'
,
moving_variance_name
=
self
.
full_name
()
+
"_bn"
+
'_variance'
)
def
forward
(
self
,
inputs
,
if_act
=
True
):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
if
if_act
:
y
=
fluid
.
layers
.
relu6
(
y
)
return
y
class
InvertedResidualUnit
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_in_filter
,
num_filters
,
stride
,
filter_size
,
padding
,
expansion_factor
,
):
super
(
InvertedResidualUnit
,
self
).
__init__
()
num_expfilter
=
int
(
round
(
num_in_filter
*
expansion_factor
))
self
.
_expand_conv
=
ConvBNLayer
(
num_channels
=
num_channels
,
num_filters
=
num_expfilter
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
)
self
.
_bottleneck_conv
=
ConvBNLayer
(
num_channels
=
num_expfilter
,
num_filters
=
num_expfilter
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
num_groups
=
num_expfilter
,
use_cudnn
=
False
)
self
.
_linear_conv
=
ConvBNLayer
(
num_channels
=
num_expfilter
,
num_filters
=
num_filters
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
)
def
forward
(
self
,
inputs
,
ifshortcut
):
y
=
self
.
_expand_conv
(
inputs
,
if_act
=
True
)
y
=
self
.
_bottleneck_conv
(
y
,
if_act
=
True
)
y
=
self
.
_linear_conv
(
y
,
if_act
=
False
)
if
ifshortcut
:
y
=
fluid
.
layers
.
elementwise_add
(
inputs
,
y
)
return
y
class
InvresiBlocks
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
in_c
,
t
,
c
,
n
,
s
):
super
(
InvresiBlocks
,
self
).
__init__
()
self
.
_first_block
=
InvertedResidualUnit
(
num_channels
=
in_c
,
num_in_filter
=
in_c
,
num_filters
=
c
,
stride
=
s
,
filter_size
=
3
,
padding
=
1
,
expansion_factor
=
t
)
self
.
_inv_blocks
=
[]
for
i
in
range
(
1
,
n
):
tmp
=
self
.
add_sublayer
(
sublayer
=
InvertedResidualUnit
(
num_channels
=
c
,
num_in_filter
=
c
,
num_filters
=
c
,
stride
=
1
,
filter_size
=
3
,
padding
=
1
,
expansion_factor
=
t
),
name
=
self
.
full_name
()
+
"_"
+
str
(
i
+
1
))
self
.
_inv_blocks
.
append
(
tmp
)
def
forward
(
self
,
inputs
):
y
=
self
.
_first_block
(
inputs
,
ifshortcut
=
False
)
for
inv_block
in
self
.
_inv_blocks
:
y
=
inv_block
(
y
,
ifshortcut
=
True
)
return
y
class
MobileNetV2
(
Model
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
):
super
(
MobileNetV2
,
self
).
__init__
()
self
.
scale
=
scale
self
.
class_dim
=
class_dim
bottleneck_params_list
=
[
(
1
,
16
,
1
,
1
),
(
6
,
24
,
2
,
2
),
(
6
,
32
,
3
,
2
),
(
6
,
64
,
4
,
2
),
(
6
,
96
,
3
,
1
),
(
6
,
160
,
3
,
2
),
(
6
,
320
,
1
,
1
),
]
#1. conv1
self
.
_conv1
=
ConvBNLayer
(
num_channels
=
3
,
num_filters
=
int
(
32
*
scale
),
filter_size
=
3
,
stride
=
2
,
padding
=
1
)
#2. bottleneck sequences
self
.
_invl
=
[]
i
=
1
in_c
=
int
(
32
*
scale
)
for
layer_setting
in
bottleneck_params_list
:
t
,
c
,
n
,
s
=
layer_setting
i
+=
1
tmp
=
self
.
add_sublayer
(
sublayer
=
InvresiBlocks
(
in_c
=
in_c
,
t
=
t
,
c
=
int
(
c
*
scale
),
n
=
n
,
s
=
s
),
name
=
'conv'
+
str
(
i
))
self
.
_invl
.
append
(
tmp
)
in_c
=
int
(
c
*
scale
)
#3. last_conv
self
.
_out_c
=
int
(
1280
*
scale
)
if
scale
>
1.0
else
1280
self
.
_conv9
=
ConvBNLayer
(
num_channels
=
in_c
,
num_filters
=
self
.
_out_c
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
)
#4. pool
self
.
_pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
#5. fc
tmp_param
=
ParamAttr
(
name
=
self
.
full_name
()
+
"fc10_weights"
)
self
.
_fc
=
Linear
(
self
.
_out_c
,
class_dim
,
act
=
'softmax'
,
param_attr
=
tmp_param
,
bias_attr
=
ParamAttr
(
name
=
"fc10_offset"
))
def
forward
(
self
,
inputs
):
y
=
self
.
_conv1
(
inputs
,
if_act
=
True
)
for
inv
in
self
.
_invl
:
y
=
inv
(
y
)
y
=
self
.
_conv9
(
y
,
if_act
=
True
)
y
=
self
.
_pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
_out_c
])
y
=
self
.
_fc
(
y
)
return
y
def
_mobilenet
(
arch
,
pretrained
=
False
,
**
kwargs
):
model
=
MobileNetV2
(
**
kwargs
)
if
pretrained
:
assert
arch
in
model_urls
,
"{} model do not have a pretrained model now, you should set pretrained=False"
.
format
(
arch
)
weight_path
=
get_weights_path
(
model_urls
[
arch
][
0
],
model_urls
[
arch
][
1
])
assert
weight_path
.
endswith
(
'.pdparams'
),
"suffix of weight must be .pdparams"
model
.
load
(
weight_path
[:
-
9
])
return
model
def
mobilnetv2_x1_0
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.0'
,
pretrained
,
scale
=
1.0
)
return
model
def
mobilnetv2_x0_25
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_0.25'
,
pretrained
,
scale
=
0.25
)
return
model
def
mobilnetv2_x0_5
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_0.5'
,
pretrained
,
scale
=
0.5
)
return
model
def
mobilnetv2_x0_75
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_0.75'
,
pretrained
,
scale
=
0.75
)
return
model
def
mobilnetv2_x1_25
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.25'
,
pretrained
,
scale
=
1.25
)
return
model
def
mobilnetv2_x1_5
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.5'
,
pretrained
,
scale
=
1.5
)
return
model
def
mobilnetv2_x1_75
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.75'
,
pretrained
,
scale
=
1.75
)
return
model
def
mobilnetv2_x2_0
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_2.0'
,
pretrained
,
scale
=
2.0
)
return
model
models/mobilenetv1.py
浏览文件 @
43ec4521
...
@@ -12,29 +12,17 @@
...
@@ -12,29 +12,17 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
os
import
time
import
sys
import
math
import
numpy
as
np
import
numpy
as
np
import
argparse
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
from
model
import
Model
from
model
import
Model
from
.download
import
get_weights_path
from
.download
import
get_weights_path
__all__
=
[
__all__
=
[
'MobileNetV1'
,
'mobilenet_v1'
]
'MobileNetV1'
,
'mobilnetv1_x0_25'
,
'mobilnetv1_x0_5'
,
'mobilnetv1_x0_75'
,
'mobilnetv1_x1_0'
,
'mobilnetv1_x1_25'
,
'mobilnetv1_x1_5'
,
'mobilnetv1_x1_75'
,
'mobilnetv1_x2_0'
]
model_urls
=
{}
model_urls
=
{}
...
@@ -114,6 +102,14 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
...
@@ -114,6 +102,14 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
class
MobileNetV1
(
Model
):
class
MobileNetV1
(
Model
):
"""MobileNetV1 model from
`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" <https://arxiv.org/abs/1704.04861>`_.
Args:
scale (float): scale of channels in each layer. Default: 1.0.
class_dim (int): output dim of last fc layer. Default: 1000.
"""
def
__init__
(
self
,
scale
=
1.0
,
class_dim
=
1000
):
def
__init__
(
self
,
scale
=
1.0
,
class_dim
=
1000
):
super
(
MobileNetV1
,
self
).
__init__
()
super
(
MobileNetV1
,
self
).
__init__
()
self
.
scale
=
scale
self
.
scale
=
scale
...
@@ -261,41 +257,6 @@ def _mobilenet(arch, pretrained=False, **kwargs):
...
@@ -261,41 +257,6 @@ def _mobilenet(arch, pretrained=False, **kwargs):
return
model
return
model
def
mobilnetv1_x1_0
(
pretrained
=
False
):
def
mobilenet_v1
(
pretrained
=
False
,
scale
=
1.0
):
model
=
_mobilenet
(
'mobilenetv1_1.0'
,
pretrained
,
scale
=
1.0
)
model
=
_mobilenet
(
'mobilenetv1_'
+
str
(
scale
),
pretrained
,
scale
=
scale
)
return
model
def
mobilnetv1_x0_25
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_0.25'
,
pretrained
,
scale
=
0.25
)
return
model
def
mobilnetv1_x0_5
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_0.5'
,
pretrained
,
scale
=
0.5
)
return
model
def
mobilnetv1_x0_75
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_0.75'
,
pretrained
,
scale
=
0.75
)
return
model
def
mobilnetv1_x1_25
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_1.25'
,
pretrained
,
scale
=
1.25
)
return
model
def
mobilnetv1_x1_5
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_1.5'
,
pretrained
,
scale
=
1.5
)
return
model
def
mobilnetv1_x1_75
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_1.75'
,
pretrained
,
scale
=
1.75
)
return
model
def
mobilnetv1_x2_0
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv1_2.0'
,
pretrained
,
scale
=
2.0
)
return
model
return
model
models/mobilenetv2.py
浏览文件 @
43ec4521
...
@@ -12,29 +12,16 @@
...
@@ -12,29 +12,16 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
os
import
time
import
math
import
sys
import
numpy
as
np
import
numpy
as
np
import
argparse
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
from
model
import
Model
from
model
import
Model
from
.download
import
get_weights_path
from
.download
import
get_weights_path
__all__
=
[
__all__
=
[
'MobileNetV2'
,
'mobilenet_v2'
]
'MobileNetV2'
,
'mobilnetv2_x0_25'
,
'mobilnetv2_x0_5'
,
'mobilnetv2_x0_75'
,
'mobilnetv2_x1_0'
,
'mobilnetv2_x1_25'
,
'mobilnetv2_x1_5'
,
'mobilnetv2_x1_75'
,
'mobilnetv2_x2_0'
]
model_urls
=
{}
model_urls
=
{}
...
@@ -160,7 +147,15 @@ class InvresiBlocks(fluid.dygraph.Layer):
...
@@ -160,7 +147,15 @@ class InvresiBlocks(fluid.dygraph.Layer):
class
MobileNetV2
(
Model
):
class
MobileNetV2
(
Model
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
):
"""MobileNetV2 model from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
scale (float): scale of channels in each layer. Default: 1.0.
class_dim (int): output dim of last fc layer. Default: 1000.
"""
def
__init__
(
self
,
scale
=
1.0
,
class_dim
=
1000
):
super
(
MobileNetV2
,
self
).
__init__
()
super
(
MobileNetV2
,
self
).
__init__
()
self
.
scale
=
scale
self
.
scale
=
scale
self
.
class_dim
=
class_dim
self
.
class_dim
=
class_dim
...
@@ -243,41 +238,11 @@ def _mobilenet(arch, pretrained=False, **kwargs):
...
@@ -243,41 +238,11 @@ def _mobilenet(arch, pretrained=False, **kwargs):
return
model
return
model
def
mobilnetv2_x1_0
(
pretrained
=
False
):
def
mobilenet_v2
(
pretrained
=
False
,
scale
=
1.0
):
model
=
_mobilenet
(
'mobilenetv2_1.0'
,
pretrained
,
scale
=
1.0
)
"""MobileNetV2
return
model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
def
mobilnetv2_x0_25
(
pretrained
=
False
):
"""
model
=
_mobilenet
(
'mobilenetv2_0.25'
,
pretrained
,
scale
=
0.25
)
model
=
_mobilenet
(
'mobilenetv2_'
+
str
(
scale
),
pretrained
,
scale
=
scale
)
return
model
def
mobilnetv2_x0_5
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_0.5'
,
pretrained
,
scale
=
0.5
)
return
model
def
mobilnetv2_x0_75
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_0.75'
,
pretrained
,
scale
=
0.75
)
return
model
def
mobilnetv2_x1_25
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.25'
,
pretrained
,
scale
=
1.25
)
return
model
def
mobilnetv2_x1_5
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.5'
,
pretrained
,
scale
=
1.5
)
return
model
def
mobilnetv2_x1_75
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_1.75'
,
pretrained
,
scale
=
1.75
)
return
model
def
mobilnetv2_x2_0
(
pretrained
=
False
):
model
=
_mobilenet
(
'mobilenetv2_2.0'
,
pretrained
,
scale
=
2.0
)
return
model
return
model
models/resnet.py
浏览文件 @
43ec4521
...
@@ -157,6 +157,15 @@ class BottleneckBlock(fluid.dygraph.Layer):
...
@@ -157,6 +157,15 @@ class BottleneckBlock(fluid.dygraph.Layer):
class
ResNet
(
Model
):
class
ResNet
(
Model
):
"""ResNet model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
Block (BasicBlock|BottleneckBlock): block module of model.
depth (int): layers of resnet, default: 50.
num_classes (int): output dim of last fc layer, default: 1000.
"""
def
__init__
(
self
,
Block
,
depth
=
50
,
num_classes
=
1000
):
def
__init__
(
self
,
Block
,
depth
=
50
,
num_classes
=
1000
):
super
(
ResNet
,
self
).
__init__
()
super
(
ResNet
,
self
).
__init__
()
...
@@ -240,20 +249,45 @@ def _resnet(arch, Block, depth, pretrained):
...
@@ -240,20 +249,45 @@ def _resnet(arch, Block, depth, pretrained):
def
resnet18
(
pretrained
=
False
):
def
resnet18
(
pretrained
=
False
):
"""ResNet 18-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return
_resnet
(
'resnet18'
,
BasicBlock
,
18
,
pretrained
)
return
_resnet
(
'resnet18'
,
BasicBlock
,
18
,
pretrained
)
def
resnet34
(
pretrained
=
False
):
def
resnet34
(
pretrained
=
False
):
"""ResNet 34-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return
_resnet
(
'resnet34'
,
BasicBlock
,
34
,
pretrained
)
return
_resnet
(
'resnet34'
,
BasicBlock
,
34
,
pretrained
)
def
resnet50
(
pretrained
=
False
):
def
resnet50
(
pretrained
=
False
):
"""ResNet 50-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return
_resnet
(
'resnet50'
,
BottleneckBlock
,
50
,
pretrained
)
return
_resnet
(
'resnet50'
,
BottleneckBlock
,
50
,
pretrained
)
def
resnet101
(
pretrained
=
False
):
def
resnet101
(
pretrained
=
False
):
"""ResNet 101-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return
_resnet
(
'resnet101'
,
BottleneckBlock
,
101
,
pretrained
)
return
_resnet
(
'resnet101'
,
BottleneckBlock
,
101
,
pretrained
)
def
resnet152
(
pretrained
=
False
):
def
resnet152
(
pretrained
=
False
):
"""ResNet 152-layer model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
return
_resnet
(
'resnet152'
,
BottleneckBlock
,
152
,
pretrained
)
return
_resnet
(
'resnet152'
,
BottleneckBlock
,
152
,
pretrained
)
models/vgg.py
浏览文件 @
43ec4521
...
@@ -12,21 +12,10 @@
...
@@ -12,21 +12,10 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
os
import
time
import
math
import
sys
import
numpy
as
np
import
argparse
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.initializer
import
MSRA
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.container
import
Sequential
from
paddle.fluid.dygraph.container
import
Sequential
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
from
model
import
Model
from
model
import
Model
from
.download
import
get_weights_path
from
.download
import
get_weights_path
...
@@ -65,7 +54,15 @@ class Classifier(fluid.dygraph.Layer):
...
@@ -65,7 +54,15 @@ class Classifier(fluid.dygraph.Layer):
class
VGG
(
Model
):
class
VGG
(
Model
):
def
__init__
(
self
,
features
,
num_classes
=
1000
,
init_weights
=
True
):
"""VGG model from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
features (fluid.dygraph.Layer): vgg features create by function make_layers.
num_classes (int): output dim of last fc layer. Default: 1000.
"""
def
__init__
(
self
,
features
,
num_classes
=
1000
):
super
(
VGG
,
self
).
__init__
()
super
(
VGG
,
self
).
__init__
()
self
.
features
=
features
self
.
features
=
features
classifier
=
Classifier
(
num_classes
)
classifier
=
Classifier
(
num_classes
)
...
@@ -74,9 +71,7 @@ class VGG(Model):
...
@@ -74,9 +71,7 @@ class VGG(Model):
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
x
=
self
.
features
(
x
)
x
=
self
.
features
(
x
)
# x = fluid.layers.adaptive_pool2d(x, pool_size=(7, 7), pool_type='avg')
x
=
fluid
.
layers
.
flatten
(
x
,
1
)
# x = fluid.layers.flatten(x, 1)
x
=
fluid
.
layers
.
reshape
(
x
,
[
-
1
,
7
*
7
*
512
])
x
=
self
.
classifier
(
x
)
x
=
self
.
classifier
(
x
)
return
x
return
x
...
@@ -116,9 +111,8 @@ cfgs = {
...
@@ -116,9 +111,8 @@ cfgs = {
def
_vgg
(
arch
,
cfg
,
batch_norm
,
pretrained
,
**
kwargs
):
def
_vgg
(
arch
,
cfg
,
batch_norm
,
pretrained
,
**
kwargs
):
if
pretrained
:
kwargs
[
'init_weights'
]
=
False
model
=
VGG
(
make_layers
(
cfgs
[
cfg
],
batch_norm
=
batch_norm
),
**
kwargs
)
model
=
VGG
(
make_layers
(
cfgs
[
cfg
],
batch_norm
=
batch_norm
),
**
kwargs
)
if
pretrained
:
if
pretrained
:
assert
arch
in
model_urls
,
"{} model do not have a pretrained model now, you should set pretrained=False"
.
format
(
assert
arch
in
model_urls
,
"{} model do not have a pretrained model now, you should set pretrained=False"
.
format
(
arch
)
arch
)
...
@@ -127,6 +121,7 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs):
...
@@ -127,6 +121,7 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs):
assert
weight_path
.
endswith
(
assert
weight_path
.
endswith
(
'.pdparams'
),
"suffix of weight must be .pdparams"
'.pdparams'
),
"suffix of weight must be .pdparams"
model
.
load
(
weight_path
[:
-
9
])
model
.
load
(
weight_path
[:
-
9
])
return
model
return
model
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录