Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
03a3da16
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
接近 2 年 前同步成功
通知
116
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
03a3da16
编写于
7月 01, 2021
作者:
C
cuicheng01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update tnt.py
上级
f731ac54
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
18 addition
and
18 deletion
+18
-18
ppcls/arch/backbone/model_zoo/tnt.py
ppcls/arch/backbone/model_zoo/tnt.py
+18
-18
未找到文件。
ppcls/arch/backbone/model_zoo/tnt.py
浏览文件 @
03a3da16
...
@@ -44,7 +44,7 @@ def drop_path(x, drop_prob=0., training=False):
...
@@ -44,7 +44,7 @@ def drop_path(x, drop_prob=0., training=False):
return
x
return
x
keep_prob
=
paddle
.
to_tensor
(
1
-
drop_prob
)
keep_prob
=
paddle
.
to_tensor
(
1
-
drop_prob
)
shape
=
(
paddle
.
shape
(
x
)[
0
],
)
+
(
1
,
)
*
(
x
.
ndim
-
1
)
shape
=
(
paddle
.
shape
(
x
)[
0
],
)
+
(
1
,
)
*
(
x
.
ndim
-
1
)
random_tensor
=
keep_prob
+
paddle
.
rand
(
shape
,
dtype
=
x
.
dtype
)
random_tensor
=
paddle
.
add
(
keep_prob
,
paddle
.
rand
(
shape
,
dtype
=
x
.
dtype
)
)
random_tensor
=
paddle
.
floor
(
random_tensor
)
# binarize
random_tensor
=
paddle
.
floor
(
random_tensor
)
# binarize
output
=
x
.
divide
(
keep_prob
)
*
random_tensor
output
=
x
.
divide
(
keep_prob
)
*
random_tensor
return
output
return
output
...
@@ -114,14 +114,15 @@ class Attention(nn.Layer):
...
@@ -114,14 +114,15 @@ class Attention(nn.Layer):
(
2
,
0
,
3
,
1
,
4
))
(
2
,
0
,
3
,
1
,
4
))
q
,
k
=
qk
[
0
],
qk
[
1
]
q
,
k
=
qk
[
0
],
qk
[
1
]
v
=
self
.
v
(
x
).
reshape
((
B
,
N
,
self
.
num_heads
,
-
1
)).
transpose
(
v
=
self
.
v
(
x
).
reshape
((
B
,
N
,
self
.
num_heads
,
x
.
shape
[
-
1
]
//
self
.
num_heads
)).
transpose
(
(
0
,
2
,
1
,
3
))
(
0
,
2
,
1
,
3
))
attn
=
(
q
@
k
.
transpose
((
0
,
1
,
3
,
2
)))
*
self
.
scale
attn
=
paddle
.
matmul
(
q
,
k
.
transpose
((
0
,
1
,
3
,
2
)))
*
self
.
scale
attn
=
nn
.
functional
.
softmax
(
attn
,
axis
=-
1
)
attn
=
nn
.
functional
.
softmax
(
attn
,
axis
=-
1
)
attn
=
self
.
attn_drop
(
attn
)
attn
=
self
.
attn_drop
(
attn
)
x
=
(
attn
@
v
).
transpose
((
0
,
2
,
1
,
3
)).
reshape
((
B
,
N
,
-
1
))
x
=
paddle
.
matmul
(
attn
,
v
)
x
=
x
.
transpose
((
0
,
2
,
1
,
3
)).
reshape
((
B
,
N
,
x
.
shape
[
-
1
]
*
x
.
shape
[
-
3
]))
x
=
self
.
proj
(
x
)
x
=
self
.
proj
(
x
)
x
=
self
.
proj_drop
(
x
)
x
=
self
.
proj_drop
(
x
)
return
x
return
x
...
@@ -182,18 +183,18 @@ class Block(nn.Layer):
...
@@ -182,18 +183,18 @@ class Block(nn.Layer):
def
forward
(
self
,
pixel_embed
,
patch_embed
):
def
forward
(
self
,
pixel_embed
,
patch_embed
):
# inner
# inner
pixel_embed
=
p
ixel_embed
+
self
.
drop_path
(
pixel_embed
=
p
addle
.
add
(
pixel_embed
,
self
.
drop_path
(
self
.
attn_in
(
self
.
norm_in
(
pixel_embed
)))
self
.
attn_in
(
self
.
norm_in
(
pixel_embed
)))
)
pixel_embed
=
p
ixel_embed
+
self
.
drop_path
(
pixel_embed
=
p
addle
.
add
(
pixel_embed
,
self
.
drop_path
(
self
.
mlp_in
(
self
.
norm_mlp_in
(
pixel_embed
)))
self
.
mlp_in
(
self
.
norm_mlp_in
(
pixel_embed
)))
)
# outer
# outer
B
,
N
,
C
=
patch_embed
.
shape
B
,
N
,
C
=
patch_embed
.
shape
patch_embed
[:,
1
:]
=
pa
tch_embed
[:,
1
:]
+
self
.
proj
(
patch_embed
[:,
1
:]
=
pa
ddle
.
add
(
patch_embed
[:,
1
:],
self
.
proj
(
self
.
norm1_proj
(
pixel_embed
).
reshape
((
B
,
N
-
1
,
-
1
)))
self
.
norm1_proj
(
pixel_embed
).
reshape
((
B
,
N
-
1
,
-
1
)))
)
patch_embed
=
pa
tch_embed
+
self
.
drop_path
(
patch_embed
=
pa
ddle
.
add
(
patch_embed
,
self
.
drop_path
(
self
.
attn_out
(
self
.
norm_out
(
patch_embed
)))
self
.
attn_out
(
self
.
norm_out
(
patch_embed
)))
)
patch_embed
=
pa
tch_embed
+
self
.
drop_path
(
patch_embed
=
pa
ddle
.
add
(
patch_embed
,
self
.
drop_path
(
self
.
mlp
(
self
.
norm_mlp
(
patch_embed
)))
self
.
mlp
(
self
.
norm_mlp
(
patch_embed
)))
)
return
pixel_embed
,
patch_embed
return
pixel_embed
,
patch_embed
...
@@ -222,10 +223,9 @@ class PixelEmbed(nn.Layer):
...
@@ -222,10 +223,9 @@ class PixelEmbed(nn.Layer):
x
=
self
.
proj
(
x
)
x
=
self
.
proj
(
x
)
x
=
nn
.
functional
.
unfold
(
x
,
self
.
new_patch_size
,
self
.
new_patch_size
)
x
=
nn
.
functional
.
unfold
(
x
,
self
.
new_patch_size
,
self
.
new_patch_size
)
x
=
x
.
transpose
((
0
,
2
,
1
)).
reshape
(
x
=
x
.
transpose
((
0
,
2
,
1
)).
reshape
(
(
B
*
self
.
num_patches
,
self
.
in_dim
,
self
.
new_patch_size
,
(
-
1
,
self
.
in_dim
,
self
.
new_patch_size
,
self
.
new_patch_size
))
self
.
new_patch_size
))
x
=
x
+
pixel_pos
x
=
x
+
pixel_pos
x
=
x
.
reshape
((
B
*
self
.
num_patches
,
self
.
in_dim
,
-
1
)).
transpose
(
x
=
x
.
reshape
((
-
1
,
self
.
in_dim
,
self
.
new_patch_size
*
self
.
new_patch_size
)).
transpose
(
(
0
,
2
,
1
))
(
0
,
2
,
1
))
return
x
return
x
...
@@ -328,7 +328,7 @@ class TNT(nn.Layer):
...
@@ -328,7 +328,7 @@ class TNT(nn.Layer):
patch_embed
=
self
.
norm2_proj
(
patch_embed
=
self
.
norm2_proj
(
self
.
proj
(
self
.
proj
(
self
.
norm1_proj
(
self
.
norm1_proj
(
pixel_embed
.
reshape
((
B
,
self
.
num_patches
,
-
1
)))))
pixel_embed
.
reshape
((
-
1
,
self
.
num_patches
,
pixel_embed
.
shape
[
-
1
]
*
pixel_embed
.
shape
[
-
2
]
)))))
patch_embed
=
paddle
.
concat
(
patch_embed
=
paddle
.
concat
(
(
self
.
cls_token
.
expand
((
B
,
-
1
,
-
1
)),
patch_embed
),
axis
=
1
)
(
self
.
cls_token
.
expand
((
B
,
-
1
,
-
1
)),
patch_embed
),
axis
=
1
)
patch_embed
=
patch_embed
+
self
.
patch_pos
patch_embed
=
patch_embed
+
self
.
patch_pos
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录