Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
lvpchen1
handpose_x
提交
501afd5d
handpose_x
项目概览
lvpchen1
/
handpose_x
与 Fork 源项目一致
Fork自
DataBall / handpose_x
通知
3
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
handpose_x
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
501afd5d
编写于
3月 16, 2021
作者:
DataBall
🚴🏻
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add rexnetv1 model
上级
da996fb9
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
198 addition
and
2 deletion
+198
-2
models/rexnetv1.py
models/rexnetv1.py
+183
-0
train.py
train.py
+15
-2
未找到文件。
models/rexnetv1.py
0 → 100644
浏览文件 @
501afd5d
"""
ReXNet
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import
torch
import
torch.nn
as
nn
from
math
import
ceil
# Memory-efficient Siwsh using torch.jit.script borrowed from the code in (https://twitter.com/jeremyphoward/status/1188251041835315200)
# Currently use memory-efficient Swish as default:
USE_MEMORY_EFFICIENT_SWISH
=
True
if
USE_MEMORY_EFFICIENT_SWISH
:
@
torch
.
jit
.
script
def
swish_fwd
(
x
):
return
x
.
mul
(
torch
.
sigmoid
(
x
))
@
torch
.
jit
.
script
def
swish_bwd
(
x
,
grad_output
):
x_sigmoid
=
torch
.
sigmoid
(
x
)
return
grad_output
*
(
x_sigmoid
*
(
1.
+
x
*
(
1.
-
x_sigmoid
)))
class
SwishJitImplementation
(
torch
.
autograd
.
Function
):
@
staticmethod
def
forward
(
ctx
,
x
):
ctx
.
save_for_backward
(
x
)
return
swish_fwd
(
x
)
@
staticmethod
def
backward
(
ctx
,
grad_output
):
x
=
ctx
.
saved_tensors
[
0
]
return
swish_bwd
(
x
,
grad_output
)
def
swish
(
x
,
inplace
=
False
):
return
SwishJitImplementation
.
apply
(
x
)
else
:
def
swish
(
x
,
inplace
=
False
):
return
x
.
mul_
(
x
.
sigmoid
())
if
inplace
else
x
.
mul
(
x
.
sigmoid
())
class
Swish
(
nn
.
Module
):
def
__init__
(
self
,
inplace
=
True
):
super
(
Swish
,
self
).
__init__
()
self
.
inplace
=
inplace
def
forward
(
self
,
x
):
return
swish
(
x
,
self
.
inplace
)
def
ConvBNAct
(
out
,
in_channels
,
channels
,
kernel
=
1
,
stride
=
1
,
pad
=
0
,
num_group
=
1
,
active
=
True
,
relu6
=
False
):
out
.
append
(
nn
.
Conv2d
(
in_channels
,
channels
,
kernel
,
stride
,
pad
,
groups
=
num_group
,
bias
=
False
))
out
.
append
(
nn
.
BatchNorm2d
(
channels
))
if
active
:
out
.
append
(
nn
.
ReLU6
(
inplace
=
True
)
if
relu6
else
nn
.
ReLU
(
inplace
=
True
))
def
ConvBNSwish
(
out
,
in_channels
,
channels
,
kernel
=
1
,
stride
=
1
,
pad
=
0
,
num_group
=
1
):
out
.
append
(
nn
.
Conv2d
(
in_channels
,
channels
,
kernel
,
stride
,
pad
,
groups
=
num_group
,
bias
=
False
))
out
.
append
(
nn
.
BatchNorm2d
(
channels
))
out
.
append
(
Swish
())
class
SE
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
channels
,
se_ratio
=
12
):
super
(
SE
,
self
).
__init__
()
self
.
avg_pool
=
nn
.
AdaptiveAvgPool2d
(
1
)
self
.
fc
=
nn
.
Sequential
(
nn
.
Conv2d
(
in_channels
,
channels
//
se_ratio
,
kernel_size
=
1
,
padding
=
0
),
nn
.
BatchNorm2d
(
channels
//
se_ratio
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
Conv2d
(
channels
//
se_ratio
,
channels
,
kernel_size
=
1
,
padding
=
0
),
nn
.
Sigmoid
()
)
def
forward
(
self
,
x
):
y
=
self
.
avg_pool
(
x
)
y
=
self
.
fc
(
y
)
return
x
*
y
class
LinearBottleneck
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
channels
,
t
,
stride
,
use_se
=
True
,
se_ratio
=
12
,
**
kwargs
):
super
(
LinearBottleneck
,
self
).
__init__
(
**
kwargs
)
self
.
use_shortcut
=
stride
==
1
and
in_channels
<=
channels
self
.
in_channels
=
in_channels
self
.
out_channels
=
channels
out
=
[]
if
t
!=
1
:
dw_channels
=
in_channels
*
t
ConvBNSwish
(
out
,
in_channels
=
in_channels
,
channels
=
dw_channels
)
else
:
dw_channels
=
in_channels
ConvBNAct
(
out
,
in_channels
=
dw_channels
,
channels
=
dw_channels
,
kernel
=
3
,
stride
=
stride
,
pad
=
1
,
num_group
=
dw_channels
,
active
=
False
)
if
use_se
:
out
.
append
(
SE
(
dw_channels
,
dw_channels
,
se_ratio
))
out
.
append
(
nn
.
ReLU6
())
ConvBNAct
(
out
,
in_channels
=
dw_channels
,
channels
=
channels
,
active
=
False
,
relu6
=
True
)
self
.
out
=
nn
.
Sequential
(
*
out
)
def
forward
(
self
,
x
):
out
=
self
.
out
(
x
)
if
self
.
use_shortcut
:
out
[:,
0
:
self
.
in_channels
]
+=
x
return
out
class
ReXNetV1
(
nn
.
Module
):
def
__init__
(
self
,
input_ch
=
16
,
final_ch
=
180
,
width_mult
=
1.0
,
depth_mult
=
1.0
,
num_classes
=
1000
,
use_se
=
True
,
se_ratio
=
12
,
dropout_factor
=
0.2
,
bn_momentum
=
0.9
):
super
(
ReXNetV1
,
self
).
__init__
()
layers
=
[
1
,
2
,
2
,
3
,
3
,
5
]
strides
=
[
1
,
2
,
2
,
2
,
1
,
2
]
use_ses
=
[
False
,
False
,
True
,
True
,
True
,
True
]
layers
=
[
ceil
(
element
*
depth_mult
)
for
element
in
layers
]
strides
=
sum
([[
element
]
+
[
1
]
*
(
layers
[
idx
]
-
1
)
for
idx
,
element
in
enumerate
(
strides
)],
[])
if
use_se
:
use_ses
=
sum
([[
element
]
*
layers
[
idx
]
for
idx
,
element
in
enumerate
(
use_ses
)],
[])
else
:
use_ses
=
[
False
]
*
sum
(
layers
[:])
ts
=
[
1
]
*
layers
[
0
]
+
[
6
]
*
sum
(
layers
[
1
:])
self
.
depth
=
sum
(
layers
[:])
*
3
stem_channel
=
32
/
width_mult
if
width_mult
<
1.0
else
32
inplanes
=
input_ch
/
width_mult
if
width_mult
<
1.0
else
input_ch
features
=
[]
in_channels_group
=
[]
channels_group
=
[]
# The following channel configuration is a simple instance to make each layer become an expand layer.
for
i
in
range
(
self
.
depth
//
3
):
if
i
==
0
:
in_channels_group
.
append
(
int
(
round
(
stem_channel
*
width_mult
)))
channels_group
.
append
(
int
(
round
(
inplanes
*
width_mult
)))
else
:
in_channels_group
.
append
(
int
(
round
(
inplanes
*
width_mult
)))
inplanes
+=
final_ch
/
(
self
.
depth
//
3
*
1.0
)
channels_group
.
append
(
int
(
round
(
inplanes
*
width_mult
)))
ConvBNSwish
(
features
,
3
,
int
(
round
(
stem_channel
*
width_mult
)),
kernel
=
3
,
stride
=
2
,
pad
=
1
)
for
block_idx
,
(
in_c
,
c
,
t
,
s
,
se
)
in
enumerate
(
zip
(
in_channels_group
,
channels_group
,
ts
,
strides
,
use_ses
)):
features
.
append
(
LinearBottleneck
(
in_channels
=
in_c
,
channels
=
c
,
t
=
t
,
stride
=
s
,
use_se
=
se
,
se_ratio
=
se_ratio
))
pen_channels
=
int
(
1280
*
width_mult
)
ConvBNSwish
(
features
,
c
,
pen_channels
)
features
.
append
(
nn
.
AdaptiveAvgPool2d
(
1
))
self
.
features
=
nn
.
Sequential
(
*
features
)
self
.
output
=
nn
.
Sequential
(
nn
.
Dropout
(
dropout_factor
),
nn
.
Conv2d
(
pen_channels
,
num_classes
,
1
,
bias
=
True
))
def
forward
(
self
,
x
):
x
=
self
.
features
(
x
)
x
=
self
.
output
(
x
).
squeeze
()
return
x
train.py
浏览文件 @
501afd5d
...
@@ -19,6 +19,10 @@ from models.squeezenet import squeezenet1_1,squeezenet1_0
...
@@ -19,6 +19,10 @@ from models.squeezenet import squeezenet1_1,squeezenet1_0
from
models.shufflenetv2
import
ShuffleNetV2
from
models.shufflenetv2
import
ShuffleNetV2
from
models.shufflenet
import
ShuffleNet
from
models.shufflenet
import
ShuffleNet
from
models.mobilenetv2
import
MobileNetV2
from
models.mobilenetv2
import
MobileNetV2
from
models.rexnetv1
import
ReXNetV1
from
torchvision.models
import
shufflenet_v2_x1_5
,
shufflenet_v2_x1_0
,
shufflenet_v2_x2_0
from
loss.loss
import
*
from
loss.loss
import
*
import
cv2
import
cv2
import
time
import
time
...
@@ -49,10 +53,18 @@ def trainer(ops,f_log):
...
@@ -49,10 +53,18 @@ def trainer(ops,f_log):
model_
=
squeezenet1_1
(
pretrained
=
True
,
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
model_
=
squeezenet1_1
(
pretrained
=
True
,
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
elif
ops
.
model
==
"shufflenetv2"
:
elif
ops
.
model
==
"shufflenetv2"
:
model_
=
ShuffleNetV2
(
ratio
=
1.
,
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
model_
=
ShuffleNetV2
(
ratio
=
1.
,
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
elif
ops
.
model
==
"shufflenet_v2_x1_5"
:
model_
=
shufflenet_v2_x1_5
(
pretrained
=
False
,
num_classes
=
ops
.
num_classes
)
elif
ops
.
model
==
"shufflenet_v2_x1_0"
:
model_
=
shufflenet_v2_x1_0
(
pretrained
=
False
,
num_classes
=
ops
.
num_classes
)
elif
ops
.
model
==
"shufflenet_v2_x2_0"
:
model_
=
shufflenet_v2_x2_0
(
pretrained
=
False
,
num_classes
=
ops
.
num_classes
)
elif
ops
.
model
==
"shufflenet"
:
elif
ops
.
model
==
"shufflenet"
:
model_
=
ShuffleNet
(
num_blocks
=
[
2
,
4
,
2
],
num_classes
=
ops
.
num_classes
,
groups
=
3
,
dropout_factor
=
ops
.
dropout
)
model_
=
ShuffleNet
(
num_blocks
=
[
2
,
4
,
2
],
num_classes
=
ops
.
num_classes
,
groups
=
3
,
dropout_factor
=
ops
.
dropout
)
elif
ops
.
model
==
"mobilenetv2"
:
elif
ops
.
model
==
"mobilenetv2"
:
model_
=
MobileNetV2
(
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
model_
=
MobileNetV2
(
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
elif
ops
.
model
==
"ReXNetV1"
:
model_
=
ReXNetV1
(
num_classes
=
ops
.
num_classes
,
dropout_factor
=
ops
.
dropout
)
else
:
else
:
print
(
" no support the model"
)
print
(
" no support the model"
)
...
@@ -164,8 +176,9 @@ if __name__ == "__main__":
...
@@ -164,8 +176,9 @@ if __name__ == "__main__":
help
=
'seed'
)
# 设置随机种子
help
=
'seed'
)
# 设置随机种子
parser
.
add_argument
(
'--model_exp'
,
type
=
str
,
default
=
'./model_exp'
,
parser
.
add_argument
(
'--model_exp'
,
type
=
str
,
default
=
'./model_exp'
,
help
=
'model_exp'
)
# 模型输出文件夹
help
=
'model_exp'
)
# 模型输出文件夹
parser
.
add_argument
(
'--model'
,
type
=
str
,
default
=
'mobilenetv2'
,
parser
.
add_argument
(
'--model'
,
type
=
str
,
default
=
'ReXNetV1'
,
help
=
'model : resnet_34,resnet_50,resnet_101,squeezenet1_0,squeezenet1_1,shufflenetv2,shufflenet,mobilenetv2'
)
# 模型类型
help
=
'''model : resnet_34,resnet_50,resnet_101,squeezenet1_0,squeezenet1_1,shufflenetv2,shufflenet,mobilenetv2
shufflenet_v2_x1_5 ,shufflenet_v2_x1_0 , shufflenet_v2_x2_0,ReXNetV1'''
)
# 模型类型
parser
.
add_argument
(
'--num_classes'
,
type
=
int
,
default
=
42
,
parser
.
add_argument
(
'--num_classes'
,
type
=
int
,
default
=
42
,
help
=
'num_classes'
)
# landmarks 个数*2
help
=
'num_classes'
)
# landmarks 个数*2
parser
.
add_argument
(
'--GPUS'
,
type
=
str
,
default
=
'0'
,
parser
.
add_argument
(
'--GPUS'
,
type
=
str
,
default
=
'0'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录