Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
8a3fd00b
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8a3fd00b
编写于
7月 28, 2020
作者:
Y
yongqiang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix transpose error. test=develop
上级
89d573e1
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
166 addition
and
63 deletion
+166
-63
lite/kernels/arm/transpose_compute.cc
lite/kernels/arm/transpose_compute.cc
+166
-63
未找到文件。
lite/kernels/arm/transpose_compute.cc
浏览文件 @
8a3fd00b
...
...
@@ -25,61 +25,174 @@ namespace lite {
namespace
kernels
{
namespace
arm
{
bool
IsShuffleChannel
(
const
std
::
vector
<
int
>
&
axis
)
{
bool
is_shuffle_channel
=
true
;
if
(
axis
.
size
()
>
2
&&
axis
[
0
]
==
0
&&
axis
[
1
]
==
2
&&
axis
[
2
]
==
1
)
{
for
(
int
i
=
3
;
i
<
axis
.
size
();
++
i
)
{
if
(
axis
[
i
]
!=
i
)
{
is_shuffle_channel
=
false
;
break
;
template
<
typename
Dtype
>
void
transpose_mat
(
const
Dtype
*
din
,
Dtype
*
dout
,
const
int
num
,
const
int
width
,
const
int
height
);
void
transpose_mat
(
const
float
*
din
,
float
*
dout
,
const
int
num
,
const
int
width
,
const
int
height
)
{
int
nw
=
width
>>
2
;
int
nh
=
height
>>
2
;
int
size_in
=
width
*
height
;
for
(
int
i
=
0
;
i
<
num
;
++
i
)
{
float
*
ptr_out
=
dout
+
i
*
size_in
;
const
float
*
ptr_in
=
din
+
i
*
size_in
;
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
nh
;
h
++
)
{
const
float
*
ptr_din_row
=
ptr_in
+
h
*
4
*
width
;
for
(
int
w
=
0
;
w
<
nw
;
w
++
)
{
float
*
data_out_ptr
=
ptr_out
+
w
*
4
*
height
+
h
*
4
;
const
float
*
din0
=
ptr_din_row
;
const
float
*
din1
=
din0
+
width
;
const
float
*
din2
=
din1
+
width
;
const
float
*
din3
=
din2
+
width
;
float
*
dout0
=
data_out_ptr
;
float
*
dout1
=
dout0
+
height
;
float
*
dout2
=
dout1
+
height
;
float
*
dout3
=
dout2
+
height
;
#ifdef __aarch64__
float32x4_t
vr0
=
vld1q_f32
(
din0
);
float32x4_t
vr1
=
vld1q_f32
(
din1
);
float32x4_t
vr2
=
vld1q_f32
(
din2
);
float32x4_t
vr3
=
vld1q_f32
(
din3
);
float32x4_t
re0
=
vtrn1q_f32
(
vr0
,
vr1
);
float32x4_t
re1
=
vtrn2q_f32
(
vr0
,
vr1
);
float32x4_t
re2
=
vtrn1q_f32
(
vr2
,
vr3
);
float32x4_t
re3
=
vtrn2q_f32
(
vr2
,
vr3
);
vst1_f32
(
dout0
,
vget_low_f32
(
re0
));
dout0
+=
2
;
vst1_f32
(
dout0
,
vget_low_f32
(
re2
));
vst1_f32
(
dout1
,
vget_low_f32
(
re1
));
dout1
+=
2
;
vst1_f32
(
dout1
,
vget_low_f32
(
re3
));
vst1_f32
(
dout2
,
vget_high_f32
(
re0
));
dout2
+=
2
;
vst1_f32
(
dout2
,
vget_high_f32
(
re2
));
vst1_f32
(
dout3
,
vget_high_f32
(
re1
));
dout3
+=
2
;
vst1_f32
(
dout3
,
vget_high_f32
(
re3
));
#else
asm
(
"vld1.32 {d0, d1}, [%[in0]]
\n
"
"vld1.32 {d2, d3}, [%[in1]]
\n
"
"vld1.32 {d4, d5}, [%[in2]]
\n
"
"vld1.32 {d6, d7}, [%[in3]]
\n
"
"vtrn.32 q0, q1
\n
"
"vtrn.32 q2, q3
\n
"
"vswp d1, d4
\n
"
"vswp d3, d6
\n
"
"vst1.32 {d0, d1}, [%[out0]]
\n
"
"vst1.32 {d2, d3}, [%[out1]]
\n
"
"vst1.32 {d4, d5}, [%[out2]]
\n
"
"vst1.32 {d6, d7}, [%[out3]]
\n
"
:
:
[
out0
]
"r"
(
dout0
),
[
out1
]
"r"
(
dout1
),
[
out2
]
"r"
(
dout2
),
[
out3
]
"r"
(
dout3
),
[
in0
]
"r"
(
din0
),
[
in1
]
"r"
(
din1
),
[
in2
]
"r"
(
din2
),
[
in3
]
"r"
(
din3
)
:
"q0"
,
"q1"
,
"q2"
,
"q3"
);
#endif
ptr_din_row
+=
4
;
}
}
// remian
for
(
int
h
=
0
;
h
<
height
;
h
++
)
{
for
(
int
w
=
nw
*
4
;
w
<
width
;
w
++
)
{
const
float
*
data_in_ptr
=
ptr_in
+
h
*
width
+
w
;
float
*
data_out_ptr
=
ptr_out
+
w
*
height
+
h
;
*
data_out_ptr
=
*
data_in_ptr
;
}
}
for
(
int
w
=
0
;
w
<
width
;
w
++
)
{
for
(
int
h
=
nh
*
4
;
h
<
height
;
h
++
)
{
const
float
*
data_in_ptr
=
ptr_in
+
h
*
width
+
w
;
float
*
data_out_ptr
=
ptr_out
+
w
*
height
+
h
;
*
data_out_ptr
=
*
data_in_ptr
;
}
}
}
else
{
return
false
;
}
return
is_shuffle_channel
;
}
template
<
typename
Dtype
>
void
ShuffleChannelCompute
(
const
std
::
vector
<
int
>
&
axis
,
const
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
)
{
const
Dtype
*
input_ptr
=
input
->
data
<
Dtype
>
();
Dtype
*
output_ptr
=
output
->
mutable_data
<
Dtype
>
();
// input and output's shape dimension must >= 2 && <= 6.
const
DDim
&
in_dim
=
input
->
dims
();
const
DDim
&
out_dim
=
output
->
dims
();
size_t
offset
=
1
;
for
(
int
i
=
3
;
i
<
axis
.
size
();
++
i
)
{
offset
*=
in_dim
[
i
];
std
::
vector
<
int
>
get_stride
(
const
paddle
::
lite
::
DDimLite
&
dims
)
{
std
::
vector
<
int
>
data_stride
{
0
};
for
(
int
i
=
0
;
i
<
dims
.
size
();
++
i
)
{
data_stride
.
push_back
(
dims
.
count
(
i
,
dims
.
size
()));
}
return
data_stride
;
}
#pragma omp parallel for collapse(3)
for
(
int
batch
=
0
;
batch
<
out_dim
[
0
];
++
batch
)
{
for
(
int
c1
=
0
;
c1
<
out_dim
[
1
];
++
c1
)
{
for
(
int
c2
=
0
;
c2
<
out_dim
[
2
];
++
c2
)
{
size_t
out_offset
=
((
batch
*
out_dim
[
1
]
+
c1
)
*
out_dim
[
2
]
+
c2
)
*
offset
;
size_t
in_offset
=
((
batch
*
in_dim
[
1
]
+
c2
)
*
in_dim
[
2
]
+
c1
)
*
offset
;
memcpy
(
output_ptr
+
out_offset
,
input_ptr
+
in_offset
,
offset
*
sizeof
(
Dtype
));
void
TransposeCompute
::
PrepareForRun
()
{
auto
&
param
=
Param
<
operators
::
TransposeParam
>
();
auto
*
input
=
param
.
x
;
auto
*
output
=
param
.
output
;
int
_num_axes
=
input
->
dims
().
size
();
CHECK
(
_num_axes
==
param
.
axis
.
size
())
<<
"axis size is not match to input dims"
;
need_trans
=
false
;
for
(
int
i
=
0
;
i
<
_num_axes
;
++
i
)
{
if
(
param
.
axis
[
i
]
!=
i
)
{
need_trans
=
true
;
break
;
}
}
if
(
!
need_trans
)
{
return
;
}
std
::
vector
<
int
>
axis_diff
;
int
j
=
0
;
for
(
int
i
=
0
;
i
<
_num_axes
;
++
i
)
{
if
(
param
.
axis
[
j
]
!=
i
)
{
axis_diff
.
push_back
(
j
);
}
else
{
j
++
;
}
}
for
(
int
i
=
0
;
i
<
axis_diff
.
size
();
i
++
)
{
}
if
(
input
->
dims
().
count
(
axis_diff
[
0
],
_num_axes
)
==
1
)
{
need_trans
=
false
;
return
;
}
if
(
axis_diff
.
size
()
==
1
)
{
trans_mat
=
true
;
_trans_num
=
input
->
dims
().
count
(
0
,
std
::
max
(
axis_diff
[
0
],
0
));
_trans_w
=
input
->
dims
().
count
(
axis_diff
[
0
]
+
1
,
_num_axes
);
_trans_h
=
input
->
dims
()[
axis_diff
[
0
]];
}
else
{
trans_mat
=
false
;
_new_steps
=
get_stride
(
output
->
dims
());
_old_steps
=
get_stride
(
input
->
dims
());
}
}
template
<
typename
Dtype
>
void
TransposeCompute_
(
const
std
::
vector
<
int
>
&
axis
,
const
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
)
{
void
TransposeCompute_
(
const
std
::
vector
<
int
>
&
axis
,
const
lite
::
Tensor
*
input
,
lite
::
Tensor
*
output
)
{
// const Dtype *input_ptr = input->data<Dtype>();
const
Dtype
*
input_ptr
=
input
->
data
<
float
>
();
Dtype
*
output_ptr
=
output
->
mutable_data
<
Dtype
>
();
const
Dtype
*
input_ptr
=
input
->
data
<
float
>
();
Dtype
*
output_ptr
=
output
->
mutable_data
<
Dtype
>
();
// input and output's shape dimension must >= 2 && <= 6.
const
DDim
&
in_dim
=
input
->
dims
();
const
DDim
&
out_dim
=
output
->
dims
();
const
DDim
&
in_dim
=
input
->
dims
();
const
DDim
&
out_dim
=
output
->
dims
();
// precompute inverted output dim and strides
size_t
rout_dim
[
6
],
strides
[
6
];
...
...
@@ -103,7 +216,7 @@ void TransposeCompute_(const std::vector<int> &axis,
for
(
int
batch
=
0
;
batch
<
out_dim
[
0
];
++
batch
)
{
for
(
int
j
=
0
;
j
<
out_dim
[
1
];
++
j
)
{
size_t
offset
=
batch
*
strides
[
permute
-
1
]
+
j
*
strides
[
permute
-
2
];
Dtype
*
out_ptr
=
output_ptr
+
(
batch
*
out_dim
[
1
]
+
j
)
*
reamin_dim
;
Dtype
*
out_ptr
=
output_ptr
+
(
batch
*
out_dim
[
1
]
+
j
)
*
reamin_dim
;
int
indics
[
4
]
=
{
0
,
0
,
0
,
0
};
for
(
int
k
=
0
;
k
<
reamin_dim
;
++
k
)
{
out_ptr
[
k
]
=
input_ptr
[
offset
];
...
...
@@ -123,37 +236,27 @@ void TransposeCompute_(const std::vector<int> &axis,
}
}
}
// Transpose
void
TransposeCompute
::
Run
()
{
auto
&
param
=
Param
<
operators
::
TransposeParam
>
();
auto
*
input
=
param
.
x
;
auto
*
output
=
param
.
output
;
auto
&
param
=
Param
<
operators
::
TransposeParam
>
();
auto
*
input
=
param
.
x
;
auto
*
output
=
param
.
output
;
const
std
::
vector
<
int
>
axis
=
param
.
axis
;
bool
shuffle_channel
=
IsShuffleChannel
(
axis
);
if
(
shuffle_channel
)
{
ShuffleChannelCompute
<
float
>
(
axis
,
input
,
output
);
}
else
{
TransposeCompute_
<
float
>
(
axis
,
input
,
output
);
}
//! only copy the data
if
(
!
need_trans
)
{
output
->
CopyDataFrom
(
*
input
);
return
;
}
// Transpose2
void
Transpose2Compute
::
Run
()
{
auto
&
param
=
Param
<
operators
::
TransposeParam
>
();
auto
*
input
=
param
.
x
;
auto
*
output
=
param
.
output
;
const
std
::
vector
<
int
>
axis
=
param
.
axis
;
}
bool
shuffle_channel
=
IsShuffleChannel
(
axis
);
if
(
shuffle_channel
)
{
ShuffleChannelCompute
<
float
>
(
axis
,
input
,
output
);
const
float
*
din
=
static_cast
<
const
float
*>
(
input
->
data
<
float
>
());
float
*
dout
=
static_cast
<
float
*>
(
output
->
mutable_data
<
float
>
());
//! transpose the data
if
(
trans_mat
)
{
transpose_mat
(
din
,
dout
,
_trans_num
,
_trans_w
,
_trans_h
);
}
else
{
TransposeCompute_
<
float
>
(
axis
,
input
,
output
);
}
return
;
}
}
// namespace arm
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录