Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleOCR
提交
78064ad9
P
PaddleOCR
项目概览
s920243400
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
78064ad9
编写于
6月 27, 2022
作者:
W
wangjingyeye
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add db++
上级
26a89db7
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
33 addition
and
22 deletion
+33
-22
ppocr/data/imaug/operators.py
ppocr/data/imaug/operators.py
+3
-3
ppocr/modeling/backbones/det_resnet.py
ppocr/modeling/backbones/det_resnet.py
+1
-1
ppocr/modeling/necks/db_fpn.py
ppocr/modeling/necks/db_fpn.py
+29
-18
未找到文件。
ppocr/data/imaug/operators.py
浏览文件 @
78064ad9
...
...
@@ -242,8 +242,8 @@ class DetResizeForTest(object):
if
'image_shape'
in
kwargs
:
self
.
image_shape
=
kwargs
[
'image_shape'
]
self
.
resize_type
=
1
if
'keep_ratio'
in
kwargs
:
######
self
.
keep_ratio
=
kwargs
[
'keep_ratio'
]
#######
if
'keep_ratio'
in
kwargs
:
self
.
keep_ratio
=
kwargs
[
'keep_ratio'
]
elif
'limit_side_len'
in
kwargs
:
self
.
limit_side_len
=
kwargs
[
'limit_side_len'
]
self
.
limit_type
=
kwargs
.
get
(
'limit_type'
,
'min'
)
...
...
@@ -273,7 +273,7 @@ class DetResizeForTest(object):
def
resize_image_type1
(
self
,
img
):
resize_h
,
resize_w
=
self
.
image_shape
ori_h
,
ori_w
=
img
.
shape
[:
2
]
# (h, w, c)
if
self
.
keep_ratio
:
########
if
self
.
keep_ratio
:
resize_w
=
ori_w
*
resize_h
/
ori_h
N
=
math
.
ceil
(
resize_w
/
32
)
resize_w
=
N
*
32
...
...
ppocr/modeling/backbones/det_resnet.py
浏览文件 @
78064ad9
# copyright (c) 202
0
PaddlePaddle Authors. All Rights Reserve.
# copyright (c) 202
2
PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
ppocr/modeling/necks/db_fpn.py
浏览文件 @
78064ad9
...
...
@@ -105,7 +105,7 @@ class DSConv(nn.Layer):
class
DBFPN
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
use_asf
=
Non
e
,
**
kwargs
):
def
__init__
(
self
,
in_channels
,
out_channels
,
use_asf
=
Fals
e
,
**
kwargs
):
super
(
DBFPN
,
self
).
__init__
()
self
.
out_channels
=
out_channels
self
.
use_asf
=
use_asf
...
...
@@ -164,7 +164,7 @@ class DBFPN(nn.Layer):
weight_attr
=
ParamAttr
(
initializer
=
weight_attr
),
bias_attr
=
False
)
if
self
.
use_asf
:
if
self
.
use_asf
is
True
:
self
.
asf
=
ASFBlock
(
self
.
out_channels
,
self
.
out_channels
//
4
)
def
forward
(
self
,
x
):
...
...
@@ -192,7 +192,7 @@ class DBFPN(nn.Layer):
fuse
=
paddle
.
concat
([
p5
,
p4
,
p3
,
p2
],
axis
=
1
)
if
self
.
use_asf
:
if
self
.
use_asf
is
True
:
fuse
=
self
.
asf
(
fuse
,
[
p5
,
p4
,
p3
,
p2
])
return
fuse
...
...
@@ -367,7 +367,19 @@ class LKPAN(nn.Layer):
class
ASFBlock
(
nn
.
Layer
):
"""
This code is refered from:
https://github.com/MhLiao/DB/blob/master/decoders/feature_attention.py
"""
def
__init__
(
self
,
in_channels
,
inter_channels
,
out_features_num
=
4
):
"""
Adaptive Scale Fusion (ASF) block of DBNet++
Args:
in_channels: the number of channels in the input data
inter_channels: the number of middle channels
out_features_num: the number of fused stages
"""
super
(
ASFBlock
,
self
).
__init__
()
weight_attr
=
paddle
.
nn
.
initializer
.
KaimingUniform
()
self
.
in_channels
=
in_channels
...
...
@@ -375,39 +387,38 @@ class ASFBlock(nn.Layer):
self
.
out_features_num
=
out_features_num
self
.
conv
=
nn
.
Conv2D
(
in_channels
,
inter_channels
,
3
,
padding
=
1
)
self
.
attention_block_1
=
nn
.
Sequential
(
self
.
spatial_scale
=
nn
.
Sequential
(
#Nx1xHxW
nn
.
Conv2D
(
1
,
1
,
3
,
in_channels
=
1
,
out_channels
=
1
,
kernel_size
=
3
,
bias_attr
=
False
,
padding
=
1
,
weight_attr
=
ParamAttr
(
initializer
=
weight_attr
)),
nn
.
ReLU
(),
nn
.
Conv2D
(
1
,
1
,
1
,
in_channels
=
1
,
out_channels
=
1
,
kernel_size
=
1
,
bias_attr
=
False
,
weight_attr
=
ParamAttr
(
initializer
=
weight_attr
)),
nn
.
Sigmoid
())
self
.
attention_block_2
=
nn
.
Sequential
(
self
.
channel_scale
=
nn
.
Sequential
(
nn
.
Conv2D
(
inter_channels
,
out_features_num
,
1
,
in
_channels
=
in
ter_channels
,
out_
channels
=
out_
features_num
,
kernel_size
=
1
,
bias_attr
=
False
,
weight_attr
=
ParamAttr
(
initializer
=
weight_attr
)),
nn
.
Sigmoid
())
def
forward
(
self
,
fuse_features
,
features_list
):
fuse_features
=
self
.
conv
(
fuse_features
)
attention_scores
=
self
.
attention_block_1
(
paddle
.
mean
(
fuse_features
,
axis
=
1
,
keepdim
=
True
))
+
fuse_features
attention_scores
=
self
.
attention_block_2
(
attention_scores
)
spatial_x
=
paddle
.
mean
(
fuse_features
,
axis
=
1
,
keepdim
=
True
)
attention_scores
=
self
.
spatial_scale
(
spatial_x
)
+
fuse_features
attention_scores
=
self
.
channel_scale
(
attention_scores
)
assert
len
(
features_list
)
==
self
.
out_features_num
out_list
=
[]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录