Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
8735c538
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8735c538
编写于
7月 05, 2018
作者:
I
itminner
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix code style; reset build.sh change
上级
0d3490e4
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
695 addition
and
733 deletion
+695
-733
src/operators/kernel/arm/prelu_kernel.cpp
src/operators/kernel/arm/prelu_kernel.cpp
+79
-85
src/operators/kernel/arm/resize_kernel.cpp
src/operators/kernel/arm/resize_kernel.cpp
+102
-105
src/operators/kernel/arm/scale_kernel.cpp
src/operators/kernel/arm/scale_kernel.cpp
+119
-124
src/operators/kernel/arm/slice_kernel.cpp
src/operators/kernel/arm/slice_kernel.cpp
+2
-3
src/operators/kernel/prelu_kernel.h
src/operators/kernel/prelu_kernel.h
+7
-7
src/operators/kernel/resize_kernel.h
src/operators/kernel/resize_kernel.h
+52
-54
src/operators/kernel/scale_kernel.h
src/operators/kernel/scale_kernel.h
+8
-8
src/operators/kernel/slice_kernel.h
src/operators/kernel/slice_kernel.h
+7
-7
src/operators/op_param.h
src/operators/op_param.h
+111
-112
src/operators/prelu_op.cpp
src/operators/prelu_op.cpp
+9
-9
src/operators/prelu_op.h
src/operators/prelu_op.h
+25
-26
src/operators/resize_op.cpp
src/operators/resize_op.cpp
+9
-9
src/operators/resize_op.h
src/operators/resize_op.h
+24
-24
src/operators/scale_op.cpp
src/operators/scale_op.cpp
+9
-9
src/operators/scale_op.h
src/operators/scale_op.h
+25
-26
src/operators/slice_op.cpp
src/operators/slice_op.cpp
+8
-9
src/operators/slice_op.h
src/operators/slice_op.h
+25
-26
test/operators/test_prelu_op.cpp
test/operators/test_prelu_op.cpp
+29
-29
test/operators/test_resize_op.cpp
test/operators/test_resize_op.cpp
+28
-28
test/operators/test_slice_op.cpp
test/operators/test_slice_op.cpp
+1
-1
tools/build.sh
tools/build.sh
+16
-32
未找到文件。
src/operators/kernel/arm/prelu_kernel.cpp
浏览文件 @
8735c538
...
...
@@ -18,102 +18,96 @@ limitations under the License. */
#include <operators/math/transform.h>
namespace
paddle_mobile
{
namespace
operators
{
namespace
operators
{
template
<
typename
T
>
struct
PReluFunctor
{
explicit
PReluFunctor
(
float
slope
)
{
this
->
slope_
=
slope
;
}
inline
T
operator
()(
T
in
)
const
{
return
in
>
0
?
in
:
in
*
slope_
;
}
template
<
typename
T
>
struct
PReluFunctor
{
explicit
PReluFunctor
(
float
slope
)
{
this
->
slope_
=
slope
;
}
inline
T
operator
()(
T
in
)
const
{
return
in
>
0
?
in
:
in
*
slope_
;
}
float
slope_
=
0.0
f
;
};
float
slope_
=
0.0
f
;
};
/*
* @b 特化到具体平台的实现, param 从 op 层传入
* */
template
<
>
void
PReluKernel
<
CPU
,
float
>::
Compute
(
const
PReluParam
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
auto
*
input_x_ptr
=
input_x
->
data
<
float
>
();
auto
*
out
=
param
.
Out
();
auto
*
out_ptr
=
out
->
mutable_data
<
float
>
();
template
<
>
void
PReluKernel
<
CPU
,
float
>::
Compute
(
const
PReluParam
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
auto
*
input_x_ptr
=
input_x
->
data
<
float
>
();
auto
*
out
=
param
.
Out
();
auto
*
out_ptr
=
out
->
mutable_data
<
float
>
();
if
(
param
.
Slopes
().
size
()
==
1
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
0
]);
math
::
Transform
trans
;
trans
(
input_x_ptr
,
input_x_ptr
+
input_x
->
numel
(),
out_ptr
,
func_
);
}
else
if
(
param
.
Slopes
().
size
()
>
1
)
{
const
int
dim_size
=
input_x
->
dims
().
size
();
switch
(
dim_size
)
{
case
0
:
break
;
case
1
:
{
const
int
input_width
=
input_x
->
dims
()[
0
];
math
::
Transform
trans
;
if
(
param
.
Slopes
().
size
()
==
1
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
0
]);
math
::
Transform
trans
;
trans
(
input_x_ptr
,
input_x_ptr
+
input_x
->
numel
(),
out_ptr
,
func_
);
}
else
if
(
param
.
Slopes
().
size
()
>
1
)
{
const
int
dim_size
=
input_x
->
dims
().
size
();
switch
(
dim_size
)
{
case
0
:
break
;
case
1
:
{
const
int
input_width
=
input_x
->
dims
()[
0
];
math
::
Transform
trans
;
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
param
.
Slopes
()[
w
];
}
}
break
;
case
2
:
{
const
int
input_height
=
input_x
->
dims
()[
0
];
const
int
input_width
=
input_x
->
dims
()[
1
];
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
param
.
Slopes
()[
w
];
}
}
break
;
case
2
:
{
const
int
input_height
=
input_x
->
dims
()[
0
];
const
int
input_width
=
input_x
->
dims
()[
1
];
math
::
Transform
trans
;
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
h
]);
const
float
*
ptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
+
h
*
input_width
;
trans
(
ptr
,
ptr
+
input_width
,
optr
,
func_
);
}
}
break
;
case
3
:
{
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
math
::
Transform
trans
;
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
h
]);
const
float
*
ptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
+
h
*
input_width
;
trans
(
ptr
,
ptr
+
input_width
,
optr
,
func_
);
}
}
break
;
case
3
:
{
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
math
::
Transform
trans
;
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
c
]);
int
size
=
input_height
*
input_width
;
const
float
*
ptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
trans
(
ptr
,
ptr
+
size
,
optr
,
func_
);
}
}
break
;
case
4
:
default:
{
const
int
batch_size
=
input_x
->
dims
()[
0
];
const
int
chan_size
=
input_x
->
dims
()[
1
];
const
int
input_height
=
input_x
->
dims
()[
2
];
const
int
input_width
=
input_x
->
dims
()[
3
];
math
::
Transform
trans
;
math
::
Transform
trans
;
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
c
]);
int
size
=
input_height
*
input_width
;
const
float
*
ptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
trans
(
ptr
,
ptr
+
size
,
optr
,
func_
);
}
}
break
;
case
4
:
default:
{
const
int
batch_size
=
input_x
->
dims
()[
0
];
const
int
chan_size
=
input_x
->
dims
()[
1
];
const
int
input_height
=
input_x
->
dims
()[
2
];
const
int
input_width
=
input_x
->
dims
()[
3
];
math
::
Transform
trans
;
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
c
]);
int
size
=
input_height
*
input_width
;
const
float
*
ptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
+
b
*
c
*
size
;
trans
(
ptr
,
ptr
+
size
,
optr
,
func_
);
}
}
}
// case 3,default
break
;
}
}
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
PReluFunctor
<
float
>
func_
(
param
.
Slopes
()[
c
]);
int
size
=
input_height
*
input_width
;
const
float
*
ptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
+
b
*
c
*
size
;
trans
(
ptr
,
ptr
+
size
,
optr
,
func_
);
}
}
}
// namespace operators
}
// case 3,default
break
;
}
}
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
\ No newline at end of file
#endif
src/operators/kernel/arm/resize_kernel.cpp
浏览文件 @
8735c538
...
...
@@ -14,114 +14,111 @@ limitations under the License. */
#ifdef RESIZE_OP
#include <cmath>
#include "operators/kernel/resize_kernel.h"
#include <cmath>
namespace
paddle_mobile
{
namespace
operators
{
void
BiLinearResizeTensor
(
const
float
*
src
,
const
int
src_height
,
const
int
src_width
,
float
*
dst
,
const
int
dst_height
,
const
int
dst_width
)
{
const
float
scale_w
=
src_width
/
(
float
)
dst_width
;
const
float
scale_h
=
src_height
/
(
float
)
dst_height
;
float
*
dst_data
=
dst
;
const
float
*
src_data
=
src
;
for
(
int
dst_h
=
0
;
dst_h
<
dst_height
;
++
dst_h
)
{
float
fh
=
dst_h
*
scale_h
;
int
src_h
=
std
::
floor
(
fh
);
fh
-=
src_h
;
const
float
w_h0
=
std
::
abs
((
float
)
1.0
-
fh
);
const
float
w_h1
=
std
::
abs
(
fh
);
const
int
dst_offset_1
=
dst_h
*
dst_width
;
const
int
src_offset_1
=
src_h
*
src_width
;
float
*
dst_data_ptr
=
dst_data
+
dst_offset_1
;
for
(
int
dst_w
=
0
;
dst_w
<
dst_width
;
++
dst_w
)
{
float
fw
=
dst_w
*
scale_w
;
int
src_w
=
std
::
floor
(
fw
);
fw
-=
src_w
;
const
float
w_w0
=
std
::
abs
((
float
)
1.0
-
fw
);
const
float
w_w1
=
std
::
abs
(
fw
);
float
dst_value
=
0
;
const
int
src_idx
=
src_offset_1
+
src_w
;
dst_value
+=
(
w_h0
*
w_w0
*
src_data
[
src_idx
]);
int
flag
=
0
;
if
(
src_w
+
1
<
src_width
){
dst_value
+=
(
w_h0
*
w_w1
*
src_data
[
src_idx
+
1
]);
++
flag
;
}
if
(
src_h
+
1
<
src_height
){
dst_value
+=
(
w_h1
*
w_w0
*
src_data
[
src_idx
+
src_width
]);
++
flag
;
}
if
(
flag
>
1
){
dst_value
+=
(
w_h1
*
w_w1
*
src_data
[
src_idx
+
src_width
+
1
]);
// ++flag;
}
*
(
dst_data_ptr
++
)
=
dst_value
;
}
}
}
void
ResizeTensor
(
const
Tensor
*
src
,
const
int
src_n
,
const
int
src_c
,
Tensor
*
dst
,
const
int
dst_n
,
const
int
dst_c
)
{
framework
::
DDim
in_dims
=
src
->
dims
();
const
int
src_chans
=
in_dims
[
1
];
const
int
src_height
=
in_dims
[
2
];
const
int
src_width
=
in_dims
[
3
];
const
int
src_offset
=
(
src_n
*
src_chans
+
src_c
)
*
src_height
*
src_width
;
framework
::
DDim
out_dims
=
dst
->
dims
();
const
int
dst_chans
=
out_dims
[
1
];
const
int
dst_height
=
out_dims
[
2
];
const
int
dst_width
=
out_dims
[
3
];
const
int
dst_offset
=
(
dst_n
*
dst_chans
+
dst_c
)
*
dst_height
*
dst_width
;
const
auto
*
src_ptr
=
src
->
data
<
float
>
();
auto
*
dst_ptr
=
dst
->
data
<
float
>
();
const
auto
*
src_data
=
&
(
src_ptr
[
src_offset
]);
auto
*
dst_data
=
&
(
dst_ptr
[
dst_offset
]);
BiLinearResizeTensor
(
src_data
,
src_height
,
src_width
,
dst_data
,
dst_height
,
dst_width
);
}
void
ResizeTensor
(
const
Tensor
*
src
,
Tensor
*
dst
)
{
framework
::
DDim
in_dims
=
src
->
dims
();
framework
::
DDim
out_dims
=
dst
->
dims
();
PADDLE_MOBILE_ENFORCE
(
in_dims
[
0
]
==
out_dims
[
0
],
"src tensor batch num not equal to dst tensor"
);
PADDLE_MOBILE_ENFORCE
(
in_dims
[
1
]
==
out_dims
[
1
],
"src tensor channel num not equal to dst tensor"
);
for
(
int
n
=
0
,
batch_num
=
in_dims
[
0
];
n
<
batch_num
;
++
n
)
{
for
(
int
c
=
0
,
chan_num
=
in_dims
[
1
];
c
<
chan_num
;
++
c
)
{
ResizeTensor
(
src
,
n
,
c
,
dst
,
n
,
c
);
}
}
}
template
<
>
void
ResizeKernel
<
CPU
,
float
>::
Compute
(
const
ResizeParam
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
const
auto
&
input_x_dims
=
input_x
->
dims
();
auto
*
out
=
param
.
Out
();
framework
::
DDim
out_dims
=
CalOutputShape
(
param
);
out
->
Resize
(
out_dims
);
ResizeTensor
(
input_x
,
out
);
}
}
// namespace operators
namespace
operators
{
void
BiLinearResizeTensor
(
const
float
*
src
,
const
int
src_height
,
const
int
src_width
,
float
*
dst
,
const
int
dst_height
,
const
int
dst_width
)
{
const
float
scale_w
=
src_width
/
(
float
)
dst_width
;
const
float
scale_h
=
src_height
/
(
float
)
dst_height
;
float
*
dst_data
=
dst
;
const
float
*
src_data
=
src
;
for
(
int
dst_h
=
0
;
dst_h
<
dst_height
;
++
dst_h
)
{
float
fh
=
dst_h
*
scale_h
;
int
src_h
=
std
::
floor
(
fh
);
fh
-=
src_h
;
const
float
w_h0
=
std
::
abs
((
float
)
1.0
-
fh
);
const
float
w_h1
=
std
::
abs
(
fh
);
const
int
dst_offset_1
=
dst_h
*
dst_width
;
const
int
src_offset_1
=
src_h
*
src_width
;
float
*
dst_data_ptr
=
dst_data
+
dst_offset_1
;
for
(
int
dst_w
=
0
;
dst_w
<
dst_width
;
++
dst_w
)
{
float
fw
=
dst_w
*
scale_w
;
int
src_w
=
std
::
floor
(
fw
);
fw
-=
src_w
;
const
float
w_w0
=
std
::
abs
((
float
)
1.0
-
fw
);
const
float
w_w1
=
std
::
abs
(
fw
);
float
dst_value
=
0
;
const
int
src_idx
=
src_offset_1
+
src_w
;
dst_value
+=
(
w_h0
*
w_w0
*
src_data
[
src_idx
]);
int
flag
=
0
;
if
(
src_w
+
1
<
src_width
)
{
dst_value
+=
(
w_h0
*
w_w1
*
src_data
[
src_idx
+
1
]);
++
flag
;
}
if
(
src_h
+
1
<
src_height
)
{
dst_value
+=
(
w_h1
*
w_w0
*
src_data
[
src_idx
+
src_width
]);
++
flag
;
}
if
(
flag
>
1
)
{
dst_value
+=
(
w_h1
*
w_w1
*
src_data
[
src_idx
+
src_width
+
1
]);
// ++flag;
}
*
(
dst_data_ptr
++
)
=
dst_value
;
}
}
}
void
ResizeTensor
(
const
Tensor
*
src
,
const
int
src_n
,
const
int
src_c
,
Tensor
*
dst
,
const
int
dst_n
,
const
int
dst_c
)
{
framework
::
DDim
in_dims
=
src
->
dims
();
const
int
src_chans
=
in_dims
[
1
];
const
int
src_height
=
in_dims
[
2
];
const
int
src_width
=
in_dims
[
3
];
const
int
src_offset
=
(
src_n
*
src_chans
+
src_c
)
*
src_height
*
src_width
;
framework
::
DDim
out_dims
=
dst
->
dims
();
const
int
dst_chans
=
out_dims
[
1
];
const
int
dst_height
=
out_dims
[
2
];
const
int
dst_width
=
out_dims
[
3
];
const
int
dst_offset
=
(
dst_n
*
dst_chans
+
dst_c
)
*
dst_height
*
dst_width
;
const
auto
*
src_ptr
=
src
->
data
<
float
>
();
auto
*
dst_ptr
=
dst
->
data
<
float
>
();
const
auto
*
src_data
=
&
(
src_ptr
[
src_offset
]);
auto
*
dst_data
=
&
(
dst_ptr
[
dst_offset
]);
BiLinearResizeTensor
(
src_data
,
src_height
,
src_width
,
dst_data
,
dst_height
,
dst_width
);
}
void
ResizeTensor
(
const
Tensor
*
src
,
Tensor
*
dst
)
{
framework
::
DDim
in_dims
=
src
->
dims
();
framework
::
DDim
out_dims
=
dst
->
dims
();
PADDLE_MOBILE_ENFORCE
(
in_dims
[
0
]
==
out_dims
[
0
],
"src tensor batch num not equal to dst tensor"
);
PADDLE_MOBILE_ENFORCE
(
in_dims
[
1
]
==
out_dims
[
1
],
"src tensor channel num not equal to dst tensor"
);
for
(
int
n
=
0
,
batch_num
=
in_dims
[
0
];
n
<
batch_num
;
++
n
)
{
for
(
int
c
=
0
,
chan_num
=
in_dims
[
1
];
c
<
chan_num
;
++
c
)
{
ResizeTensor
(
src
,
n
,
c
,
dst
,
n
,
c
);
}
}
}
template
<
>
void
ResizeKernel
<
CPU
,
float
>::
Compute
(
const
ResizeParam
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
const
auto
&
input_x_dims
=
input_x
->
dims
();
auto
*
out
=
param
.
Out
();
framework
::
DDim
out_dims
=
CalOutputShape
(
param
);
out
->
Resize
(
out_dims
);
ResizeTensor
(
input_x
,
out
);
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/arm/scale_kernel.cpp
浏览文件 @
8735c538
...
...
@@ -17,135 +17,130 @@ limitations under the License. */
#include "operators/kernel/scale_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
namespace
operators
{
/*
* @b 特化到具体平台的实现, param 从 op 层传入
* */
template
<
>
void
ScaleKernel
<
CPU
,
float
>::
Compute
(
const
ScaleParam
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
auto
*
input_x_ptr
=
input_x
->
data
<
float
>
();
auto
*
out
=
param
.
Out
();
auto
*
out_ptr
=
out
->
mutable_data
<
float
>
();
const
vector
<
float
>
scales
=
param
.
Scales
();
bool
has_bias
=
param
.
HasBias
();
const
int
dim_size
=
input_x
->
dims
().
size
();
switch
(
dim_size
){
case
1
:
{
const
int
input_width
=
input_x
->
dims
()[
0
];
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
w
++
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
scales
[
w
]
+
biases
[
w
];
}
}
else
{
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
w
++
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
scales
[
w
];
}
}
}
break
;
case
2
:
{
const
int
input_height
=
input_x
->
dims
()[
0
];
const
int
input_width
=
input_x
->
dims
()[
1
];
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
const
float
*
iptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
h
*
input_width
;
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
optr
[
w
]
=
iptr
[
w
]
*
scales
[
w
]
+
biases
[
w
];
}
}
}
else
{
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
const
float
*
iptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
h
*
input_width
;
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
optr
[
w
]
=
iptr
[
w
]
*
scales
[
w
];
}
}
}
}
break
;
case
3
:
{
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
int
size
=
input_width
*
input_height
;
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
]
+
biases
[
c
];
}
}
}
else
{
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
];
}
}
}
}
break
;
case
4
:
{
const
int
batch_size
=
input_x
->
dims
()[
0
];
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
int
size
=
input_width
*
input_height
;
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
b
*
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
]
+
biases
[
c
];
}
}
}
}
else
{
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
b
*
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
];
}
}
}
}
}
break
;
default:
break
;
template
<
>
void
ScaleKernel
<
CPU
,
float
>::
Compute
(
const
ScaleParam
&
param
)
const
{
const
auto
*
input_x
=
param
.
InputX
();
auto
*
input_x_ptr
=
input_x
->
data
<
float
>
();
auto
*
out
=
param
.
Out
();
auto
*
out_ptr
=
out
->
mutable_data
<
float
>
();
const
vector
<
float
>
scales
=
param
.
Scales
();
bool
has_bias
=
param
.
HasBias
();
const
int
dim_size
=
input_x
->
dims
().
size
();
switch
(
dim_size
)
{
case
1
:
{
const
int
input_width
=
input_x
->
dims
()[
0
];
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
w
++
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
scales
[
w
]
+
biases
[
w
];
}
}
else
{
#pragma omp parallel for
for
(
int
w
=
0
;
w
<
input_width
;
w
++
)
{
out_ptr
[
w
]
=
input_x_ptr
[
w
]
*
scales
[
w
];
}
}
}
break
;
case
2
:
{
const
int
input_height
=
input_x
->
dims
()[
0
];
const
int
input_width
=
input_x
->
dims
()[
1
];
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
const
float
*
iptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
h
*
input_width
;
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
optr
[
w
]
=
iptr
[
w
]
*
scales
[
w
]
+
biases
[
w
];
}
}
}
else
{
#pragma omp parallel for
for
(
int
h
=
0
;
h
<
input_height
;
++
h
)
{
const
float
*
iptr
=
input_x_ptr
+
h
*
input_width
;
float
*
optr
=
out_ptr
+
h
*
input_width
;
for
(
int
w
=
0
;
w
<
input_width
;
++
w
)
{
optr
[
w
]
=
iptr
[
w
]
*
scales
[
w
];
}
}
}
}
break
;
case
3
:
{
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
int
size
=
input_width
*
input_height
;
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
]
+
biases
[
c
];
}
}
}
else
{
#pragma omp parallel for
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
c
*
size
;
float
*
optr
=
out_ptr
+
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
];
}
}
}
}
break
;
case
4
:
{
const
int
batch_size
=
input_x
->
dims
()[
0
];
const
int
chan_size
=
input_x
->
dims
()[
0
];
const
int
input_height
=
input_x
->
dims
()[
1
];
const
int
input_width
=
input_x
->
dims
()[
2
];
int
size
=
input_width
*
input_height
;
if
(
has_bias
)
{
const
vector
<
float
>
biases
=
param
.
Biases
();
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
b
*
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
]
+
biases
[
c
];
}
}
}
}
else
{
#pragma omp parallel for
for
(
int
b
=
0
;
b
<
batch_size
;
++
b
)
{
for
(
int
c
=
0
;
c
<
chan_size
;
++
c
)
{
const
float
*
iptr
=
input_x_ptr
+
b
*
c
*
size
;
float
*
optr
=
out_ptr
+
b
*
c
*
size
;
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
optr
[
i
]
=
iptr
[
i
]
*
scales
[
c
];
}
}
}
}
// namespace operators
}
}
break
;
default:
break
;
}
}
}
// namespace operators
}
// namespace paddle_mobile
#endif
\ No newline at end of file
#endif
src/operators/kernel/arm/slice_kernel.cpp
浏览文件 @
8735c538
...
...
@@ -17,7 +17,6 @@ limitations under the License. */
#include "operators/kernel/slice_kernel.h"
namespace
paddle_mobile
{
namespace
operators
{
}
}
namespace
operators
{}
}
// namespace paddle_mobile
#endif
src/operators/kernel/prelu_kernel.h
浏览文件 @
8735c538
...
...
@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once;
namespace
paddle_mobile
{
namespace
operators
{
namespace
operators
{
template
<
typename
DeviceType
,
typename
T
>
class
PReluKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
PReluParam
>
{
public:
void
Compute
(
const
PReluParam
&
param
)
const
;
};
}
// namespace operators
template
<
typename
DeviceType
,
typename
T
>
class
PReluKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
PReluParam
>
{
public:
void
Compute
(
const
PReluParam
&
param
)
const
;
};
}
// namespace operators
}
// namespace paddle_mobile
src/operators/kernel/resize_kernel.h
浏览文件 @
8735c538
...
...
@@ -22,60 +22,58 @@ limitations under the License. */
#include "operators/op_param.h"
namespace
paddle_mobile
{
namespace
operators
{
inline
framework
::
DDim
CalOutputShape
(
const
ResizeParam
&
param
)
{
const
auto
*
input_x
=
param
.
InputX
();
const
auto
&
input_x_dims
=
input_x
->
dims
();
auto
*
out
=
param
.
Out
();
framework
::
DDim
out_dims
=
out
->
dims
();
const
auto
*
input_shape
=
param
.
InputShape
();
if
(
input_shape
)
{
auto
*
shape_data
=
input_shape
->
data
<
int
>
();
framework
::
Tensor
cpu_shape_tensor
;
auto
shape
=
std
::
vector
<
int
>
(
shape_data
,
shape_data
+
input_shape
->
numel
());
const
int
in_batch_size
=
input_x
->
dims
()[
0
];
const
int
in_chan_size
=
input_x
->
dims
()[
1
];
const
int
in_height
=
input_x
->
dims
()[
2
];
const
int
in_width
=
input_x
->
dims
()[
3
];
int
out_height
=
0
;
int
out_width
=
0
;
bool
is_pyramid_test
=
param
.
IsPyramidTest
();
if
(
is_pyramid_test
==
false
)
{
out_height
=
param
.
Height
();
out_width
=
param
.
Width
();
PADDLE_MOBILE_ENFORCE
(
out_height
>
0
,
"output height is required"
);
PADDLE_MOBILE_ENFORCE
(
out_width
>
0
,
"output width is required"
);
}
else
{
float
out_height_scale
=
param
.
OutHeightScale
();
float
out_width_scale
=
param
.
OutWidthScale
();
PADDLE_MOBILE_ENFORCE
(
out_height_scale
>
0
,
"output height scale is required"
);
PADDLE_MOBILE_ENFORCE
(
out_width_scale
>
0
,
"output width scale is required"
);
out_height
=
int
(
out_height_scale
*
in_height
);
out_width
=
int
(
out_width_scale
*
in_width
);
}
out_dims
=
framework
::
make_ddim
(
{
in_batch_size
,
in_chan_size
,
in_height
,
in_width
}
);
}
return
out_dims
;
}
template
<
typename
DeviceType
,
typename
T
>
class
ResizeKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
ResizeParam
>
{
public:
void
Compute
(
const
ResizeParam
&
param
)
const
;
};
}
// namespace operators
namespace
operators
{
inline
framework
::
DDim
CalOutputShape
(
const
ResizeParam
&
param
)
{
const
auto
*
input_x
=
param
.
InputX
();
const
auto
&
input_x_dims
=
input_x
->
dims
();
auto
*
out
=
param
.
Out
();
framework
::
DDim
out_dims
=
out
->
dims
();
const
auto
*
input_shape
=
param
.
InputShape
();
if
(
input_shape
)
{
auto
*
shape_data
=
input_shape
->
data
<
int
>
();
framework
::
Tensor
cpu_shape_tensor
;
auto
shape
=
std
::
vector
<
int
>
(
shape_data
,
shape_data
+
input_shape
->
numel
());
const
int
in_batch_size
=
input_x
->
dims
()[
0
];
const
int
in_chan_size
=
input_x
->
dims
()[
1
];
const
int
in_height
=
input_x
->
dims
()[
2
];
const
int
in_width
=
input_x
->
dims
()[
3
];
int
out_height
=
0
;
int
out_width
=
0
;
bool
is_pyramid_test
=
param
.
IsPyramidTest
();
if
(
is_pyramid_test
==
false
)
{
out_height
=
param
.
Height
();
out_width
=
param
.
Width
();
PADDLE_MOBILE_ENFORCE
(
out_height
>
0
,
"output height is required"
);
PADDLE_MOBILE_ENFORCE
(
out_width
>
0
,
"output width is required"
);
}
else
{
float
out_height_scale
=
param
.
OutHeightScale
();
float
out_width_scale
=
param
.
OutWidthScale
();
PADDLE_MOBILE_ENFORCE
(
out_height_scale
>
0
,
"output height scale is required"
);
PADDLE_MOBILE_ENFORCE
(
out_width_scale
>
0
,
"output width scale is required"
);
out_height
=
int
(
out_height_scale
*
in_height
);
out_width
=
int
(
out_width_scale
*
in_width
);
}
out_dims
=
framework
::
make_ddim
(
{
in_batch_size
,
in_chan_size
,
in_height
,
in_width
});
}
return
out_dims
;
}
template
<
typename
DeviceType
,
typename
T
>
class
ResizeKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
ResizeParam
>
{
public:
void
Compute
(
const
ResizeParam
&
param
)
const
;
};
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/kernel/scale_kernel.h
浏览文件 @
8735c538
...
...
@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once;
namespace
paddle_mobile
{
namespace
operators
{
namespace
operators
{
template
<
typename
DeviceType
,
typename
T
>
class
ScaleKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
ScaleParam
>
{
public:
void
Compute
(
const
ScaleParam
&
param
)
const
;
};
}
// namespace operators
}
// namespace paddle_mobile
\ No newline at end of file
template
<
typename
DeviceType
,
typename
T
>
class
ScaleKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
ScaleParam
>
{
public:
void
Compute
(
const
ScaleParam
&
param
)
const
;
};
}
// namespace operators
}
// namespace paddle_mobile
src/operators/kernel/slice_kernel.h
浏览文件 @
8735c538
...
...
@@ -18,12 +18,12 @@ limitations under the License. */
#pragma once;
namespace
paddle_mobile
{
namespace
operators
{
namespace
operators
{
template
<
typename
DeviceType
,
typename
T
>
class
SliceKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
SliceParam
>
{
public:
void
Compute
(
const
SliceParam
&
param
)
const
{}
};
}
// namespace operators
template
<
typename
DeviceType
,
typename
T
>
class
SliceKernel
:
public
framework
::
OpKernelBase
<
DeviceType
,
SliceParam
>
{
public:
void
Compute
(
const
SliceParam
&
param
)
const
{}
};
}
// namespace operators
}
// namespace paddle_mobile
src/operators/op_param.h
浏览文件 @
8735c538
...
...
@@ -730,123 +730,122 @@ class ReshapeParam : public OpParam {
#endif
#ifdef SCALE_OP
class
ScaleParam
:
public
OpParam
{
public:
ScaleParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_bias_
=
InputBiasFrom
<
framework
::
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
inplace_
=
GetAttr
<
bool
>
(
"inplace"
,
attrs
);
has_bias_
=
GetAttr
<
bool
>
(
"has_bias"
,
attrs
);
scales_
=
GetAttr
<
vector
<
float
>>
(
"scales"
,
attrs
);
biases_
=
GetAttr
<
vector
<
float
>>
(
"biases"
,
attrs
);
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
const
Tensor
*
InputBias
()
const
{
return
input_bias_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
bool
&
Inplace
()
const
{
return
inplace_
;
}
const
bool
&
HasBias
()
const
{
return
has_bias_
;
}
const
vector
<
float
>
&
Scales
()
const
{
return
scales_
;
}
const
vector
<
float
>
&
Biases
()
const
{
return
biases_
;
}
private:
Tensor
*
input_x_
;
Tensor
*
input_bias_
;
Tensor
*
out_
;
bool
inplace_
;
bool
has_bias_
;
vector
<
float
>
scales_
;
vector
<
float
>
biases_
;
};
class
ScaleParam
:
public
OpParam
{
public:
ScaleParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_bias_
=
InputBiasFrom
<
framework
::
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
inplace_
=
GetAttr
<
bool
>
(
"inplace"
,
attrs
);
has_bias_
=
GetAttr
<
bool
>
(
"has_bias"
,
attrs
);
scales_
=
GetAttr
<
vector
<
float
>>
(
"scales"
,
attrs
);
biases_
=
GetAttr
<
vector
<
float
>>
(
"biases"
,
attrs
);
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
const
Tensor
*
InputBias
()
const
{
return
input_bias_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
bool
&
Inplace
()
const
{
return
inplace_
;
}
const
bool
&
HasBias
()
const
{
return
has_bias_
;
}
const
vector
<
float
>
&
Scales
()
const
{
return
scales_
;
}
const
vector
<
float
>
&
Biases
()
const
{
return
biases_
;
}
private:
Tensor
*
input_x_
;
Tensor
*
input_bias_
;
Tensor
*
out_
;
bool
inplace_
;
bool
has_bias_
;
vector
<
float
>
scales_
;
vector
<
float
>
biases_
;
};
#endif
#ifdef SLICE_OP
class
SliceParam
:
public
OpParam
{
public:
SliceParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_shape_
=
InputShapeFrom
<
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
axis_
=
GetAttr
<
int
>
(
"axis"
,
attrs
);
slice_points_
=
GetAttr
<
vector
<
int
>>
(
"slice_points"
,
attrs
);
inplace_
=
GetAttr
<
bool
>
(
"inplace"
,
attrs
);
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
const
Tensor
*
InputShape
()
const
{
return
input_shape_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
int
&
Axis
()
const
{
return
axis_
;
}
const
vector
<
int
>
&
SlicePoints
()
const
{
return
slice_points_
;
}
const
bool
&
Inplace
()
const
{
return
inplace_
;
}
private:
Tensor
*
input_x_
;
Tensor
*
input_shape_
;
Tensor
*
out_
;
int
axis_
;
vector
<
int
>
slice_points_
;
bool
inplace_
;
};
class
SliceParam
:
public
OpParam
{
public:
SliceParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_shape_
=
InputShapeFrom
<
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
axis_
=
GetAttr
<
int
>
(
"axis"
,
attrs
);
slice_points_
=
GetAttr
<
vector
<
int
>>
(
"slice_points"
,
attrs
);
inplace_
=
GetAttr
<
bool
>
(
"inplace"
,
attrs
);
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
const
Tensor
*
InputShape
()
const
{
return
input_shape_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
int
&
Axis
()
const
{
return
axis_
;
}
const
vector
<
int
>
&
SlicePoints
()
const
{
return
slice_points_
;
}
const
bool
&
Inplace
()
const
{
return
inplace_
;
}
private:
Tensor
*
input_x_
;
Tensor
*
input_shape_
;
Tensor
*
out_
;
int
axis_
;
vector
<
int
>
slice_points_
;
bool
inplace_
;
};
#endif
#ifdef RESIZE_OP
class
ResizeParam
:
public
OpParam
{
public:
ResizeParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_shape_
=
InputShapeFrom
<
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
is_pyramid_test_
=
GetAttr
<
bool
>
(
"is_pyramid_test"
,
attrs
);
height_
=
GetAttr
<
int
>
(
"height"
,
attrs
);
width_
=
GetAttr
<
int
>
(
"width"
,
attrs
);
out_height_scale_
=
GetAttr
<
float
>
(
"out_height_scale"
,
attrs
);
out_width_scale_
=
GetAttr
<
float
>
(
"out_width_scale"
,
attrs
);
}
public:
ResizeParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_shape_
=
InputShapeFrom
<
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
is_pyramid_test_
=
GetAttr
<
bool
>
(
"is_pyramid_test"
,
attrs
);
height_
=
GetAttr
<
int
>
(
"height"
,
attrs
);
width_
=
GetAttr
<
int
>
(
"width"
,
attrs
);
out_height_scale_
=
GetAttr
<
float
>
(
"out_height_scale"
,
attrs
);
out_width_scale_
=
GetAttr
<
float
>
(
"out_width_scale"
,
attrs
);
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
const
Tensor
*
InputShape
()
const
{
return
input_shape_
;
}
const
Tensor
*
InputShape
()
const
{
return
input_shape_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
bool
&
IsPyramidTest
()
const
{
return
is_pyramid_test_
;
}
const
bool
&
IsPyramidTest
()
const
{
return
is_pyramid_test_
;
}
const
int
&
Height
()
const
{
return
height_
;
}
const
int
&
Height
()
const
{
return
height_
;
}
const
int
&
Width
()
const
{
return
width_
;
}
const
int
&
Width
()
const
{
return
width_
;
}
const
float
&
OutHeightScale
()
const
{
return
out_height_scale_
;
}
const
float
&
OutHeightScale
()
const
{
return
out_height_scale_
;
}
const
float
&
OutWidthScale
()
const
{
return
out_width_scale_
;
}
const
float
&
OutWidthScale
()
const
{
return
out_width_scale_
;
}
private:
Tensor
*
input_x_
;
Tensor
*
input_shape_
;
Tensor
*
out_
;
bool
is_pyramid_test_
;
int
height_
;
int
width_
;
float
out_height_scale_
;
float
out_width_scale_
;
private:
Tensor
*
input_x_
;
Tensor
*
input_shape_
;
Tensor
*
out_
;
bool
is_pyramid_test_
;
int
height_
;
int
width_
;
float
out_height_scale_
;
float
out_width_scale_
;
};
#endif
#ifdef RELU_OP
/*
* @b op 层实例化好这个 param 传递给 kernel 层使用
...
...
@@ -871,22 +870,22 @@ class ReluParam : public OpParam {
#ifdef PRELU_OP
class
PReluParam
:
public
OpParam
{
public:
PReluParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
slopes_
=
GetAttr
<
vector
<
float
>>
(
"slopes"
,
attrs
);
}
public:
PReluParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_x_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
out_
=
OutFrom
<
LoDTensor
>
(
outputs
,
scope
);
slopes_
=
GetAttr
<
vector
<
float
>>
(
"slopes"
,
attrs
);
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
vector
<
float
>
&
Slopes
()
const
{
return
slopes_
;
}
const
Tensor
*
InputX
()
const
{
return
input_x_
;
}
Tensor
*
Out
()
const
{
return
out_
;
}
const
vector
<
float
>
&
Slopes
()
const
{
return
slopes_
;
}
private:
Tensor
*
input_x_
;
Tensor
*
out_
;
vector
<
float
>
slopes_
;
private:
Tensor
*
input_x_
;
Tensor
*
out_
;
vector
<
float
>
slopes_
;
};
#endif
...
...
src/operators/prelu_op.cpp
浏览文件 @
8735c538
...
...
@@ -16,15 +16,15 @@ limitations under the License. */
#include "operators/prelu_op.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
PReluOp
<
Dtype
,
T
>::
InferShape
()
const
{
auto
input_dims
=
this
->
param_
.
InputX
()
->
dims
();
this
->
param_
.
Out
()
->
Resize
(
input_dims
);
}
template
class
PReluOp
<
CPU
,
float
>;
}
// namespace operators
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
PReluOp
<
Dtype
,
T
>::
InferShape
()
const
{
auto
input_dims
=
this
->
param_
.
InputX
()
->
dims
();
this
->
param_
.
Out
()
->
Resize
(
input_dims
);
}
template
class
PReluOp
<
CPU
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
/*
...
...
src/operators/prelu_op.h
浏览文件 @
8735c538
...
...
@@ -23,32 +23,31 @@ limitations under the License. */
#include "operators/op_param.h"
namespace
paddle_mobile
{
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
PReluOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
PReluParam
,
operators
::
PReluKernel
<
DeviceType
,
T
>>
{
public:
PReluOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
PReluParam
,
operators
::
PReluKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
PReluParam
,
operators
::
PReluKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
PReluOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
PReluParam
,
operators
::
PReluKernel
<
DeviceType
,
T
>>
{
public:
PReluOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
PReluParam
,
operators
::
PReluKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
PReluParam
,
operators
::
PReluKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/resize_op.cpp
浏览文件 @
8735c538
...
...
@@ -17,15 +17,15 @@ limitations under the License. */
#include "operators/resize_op.h"
#include <vector>
namespace
paddle_mobile
{
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
ResizeOp
<
Dtype
,
T
>::
InferShape
()
const
{
auto
out_dims
=
CalOutputShape
(
this
->
param_
);
this
->
param_
.
Out
()
->
Resize
(
out_dims
);
}
template
class
ResizeOp
<
CPU
,
float
>;
}
// namespace operators
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
ResizeOp
<
Dtype
,
T
>::
InferShape
()
const
{
auto
out_dims
=
CalOutputShape
(
this
->
param_
);
this
->
param_
.
Out
()
->
Resize
(
out_dims
);
}
template
class
ResizeOp
<
CPU
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
...
...
src/operators/resize_op.h
浏览文件 @
8735c538
...
...
@@ -23,30 +23,30 @@ limitations under the License. */
#include "operators/op_param.h"
namespace
paddle_mobile
{
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
ResizeOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
ResizeParam
,
operators
::
ResizeKernel
<
DeviceType
,
T
>>
{
public:
ResizeOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
ResizeParam
,
operators
::
ResizeKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
ResizeParam
,
operators
::
ResizeKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
ResizeOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
ResizeParam
,
operators
::
ResizeKernel
<
DeviceType
,
T
>>
{
public:
ResizeOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
ResizeParam
,
operators
::
ResizeKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
ResizeParam
,
operators
::
ResizeKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/scale_op.cpp
浏览文件 @
8735c538
...
...
@@ -17,15 +17,15 @@ limitations under the License. */
#include "operators/scale_op.h"
#include <vector>
namespace
paddle_mobile
{
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
ScaleOp
<
Dtype
,
T
>::
InferShape
()
const
{
auto
input_dims
=
this
->
param_
.
InputX
()
->
dims
();
this
->
param_
.
Out
()
->
Resize
(
input_dims
);
}
template
class
ScaleOp
<
CPU
,
float
>;
}
// namespace operators
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
ScaleOp
<
Dtype
,
T
>::
InferShape
()
const
{
auto
input_dims
=
this
->
param_
.
InputX
()
->
dims
();
this
->
param_
.
Out
()
->
Resize
(
input_dims
);
}
template
class
ScaleOp
<
CPU
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
...
...
src/operators/scale_op.h
浏览文件 @
8735c538
...
...
@@ -23,32 +23,31 @@ limitations under the License. */
#include "operators/op_param.h"
namespace
paddle_mobile
{
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
ScaleOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
ScaleParam
,
operators
::
ScaleKernel
<
DeviceType
,
T
>>
{
public:
ScaleOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
ScaleParam
,
operators
::
ScaleKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
ScaleParam
,
operators
::
ScaleKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
ScaleOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
ScaleParam
,
operators
::
ScaleKernel
<
DeviceType
,
T
>>
{
public:
ScaleOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
ScaleParam
,
operators
::
ScaleKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
ScaleParam
,
operators
::
ScaleKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
}
// namespace paddle_mobile
#endif
src/operators/slice_op.cpp
浏览文件 @
8735c538
...
...
@@ -17,15 +17,14 @@ limitations under the License. */
#include "operators/slice_op.h"
#include <vector>
namespace
paddle_mobile
{
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
SliceOp
<
Dtype
,
T
>::
InferShape
()
const
{
/// todo: add InputShape() detection.
}
template
class
SliceOp
<
CPU
,
float
>;
}
// namespace operators
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
SliceOp
<
Dtype
,
T
>::
InferShape
()
const
{
/// todo: add InputShape() detection.
}
template
class
SliceOp
<
CPU
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
...
...
src/operators/slice_op.h
浏览文件 @
8735c538
...
...
@@ -23,32 +23,31 @@ limitations under the License. */
#include "operators/op_param.h"
namespace
paddle_mobile
{
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
SliceOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
SliceParam
,
operators
::
SliceKernel
<
DeviceType
,
T
>>
{
public:
SliceOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
SliceParam
,
operators
::
SliceKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
SliceParam
,
operators
::
SliceKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
namespace
operators
{
using
paddle_mobile
::
framework
::
Tensor
;
template
<
typename
DeviceType
,
typename
T
>
class
SliceOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
,
SliceParam
,
operators
::
SliceKernel
<
DeviceType
,
T
>>
{
public:
SliceOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
,
SliceParam
,
operators
::
SliceKernel
<
DeviceType
,
T
>>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
using
framework
::
OperatorWithKernel
<
DeviceType
,
SliceParam
,
operators
::
SliceKernel
<
DeviceType
,
T
>>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
};
}
// namespace operators
}
// namespace paddle_mobile
#endif
test/operators/test_prelu_op.cpp
浏览文件 @
8735c538
...
...
@@ -17,42 +17,42 @@ limitations under the License. */
#include "operators/prelu_op.h"
int
main
()
{
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
auto
program
=
loader
.
Load
(
g_resnet
);
PADDLE_MOBILE_ENFORCE
(
program
.
originProgram
!=
nullptr
,
"program file read fail"
);
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
auto
program
=
loader
.
Load
(
g_resnet
);
PADDLE_MOBILE_ENFORCE
(
program
.
originProgram
!=
nullptr
,
"program file read fail"
);
Executor4Test
<
paddle_mobile
::
CPU
,
paddle_mobile
::
operators
::
PReluOp
<
paddle_mobile
::
CPU
,
float
>>
executor
(
program
,
"prelu"
);
Executor4Test
<
paddle_mobile
::
CPU
,
paddle_mobile
::
operators
::
PReluOp
<
paddle_mobile
::
CPU
,
float
>>
executor
(
program
,
"prelu"
);
// 1. input_tensors;
vector
<
Tensor
>
input_tensors
;
// 1. input_tensors;
vector
<
Tensor
>
input_tensors
;
Tensor
input1
;
auto
input1_data
=
CreateInput
<
float
>
(
&
input1
,
{
1
,
2
,
3
,
4
},
-
1
,
1
);
input_tensors
.
push_back
(
input1
);
Tensor
input1
;
auto
input1_data
=
CreateInput
<
float
>
(
&
input1
,
{
1
,
2
,
3
,
4
},
-
1
,
1
);
input_tensors
.
push_back
(
input1
);
// 2. input_names
vector
<
string
>
input_names
({
"batch_norm_0.tmp_2"
,
});
// 2. input_names
vector
<
string
>
input_names
({
"batch_norm_0.tmp_2"
,
});
// 3. output_names
vector
<
string
>
output_names
({
"batch_norm_0.tmp_3"
});
// 3. output_names
vector
<
string
>
output_names
({
"batch_norm_0.tmp_3"
});
// 4. out_dims;
vector
<
DDim
>
out_ddims
;
auto
out_ddim
=
paddle_mobile
::
framework
::
make_ddim
({
1
,
2
,
3
,
4
});
out_ddims
.
push_back
(
out_ddim
);
// 4. out_dims;
vector
<
DDim
>
out_ddims
;
auto
out_ddim
=
paddle_mobile
::
framework
::
make_ddim
({
1
,
2
,
3
,
4
});
out_ddims
.
push_back
(
out_ddim
);
auto
output
=
executor
.
Predict
<
LoDTensor
>
(
input_tensors
,
input_names
,
output_names
,
out_ddims
);
auto
output
=
executor
.
Predict
<
LoDTensor
>
(
input_tensors
,
input_names
,
output_names
,
out_ddims
);
auto
output0_data
=
output
[
0
]
->
data
<
float
>
();
auto
output0_data
=
output
[
0
]
->
data
<
float
>
();
for
(
int
j
=
0
;
j
<
output
[
0
]
->
numel
();
++
j
)
{
DLOG
<<
" value of output: "
<<
output0_data
[
j
];
}
return
0
;
for
(
int
j
=
0
;
j
<
output
[
0
]
->
numel
();
++
j
)
{
DLOG
<<
" value of output: "
<<
output0_data
[
j
];
}
return
0
;
}
test/operators/test_resize_op.cpp
浏览文件 @
8735c538
...
...
@@ -16,32 +16,32 @@ limitations under the License. */
#include "operators/resize_op.h"
int
main
()
{
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
auto
program
=
loader
.
Load
(
std
::
string
(
g_mobilenet_ssd
));
if
(
program
.
originProgram
==
nullptr
)
{
DLOG
<<
"program read file"
;
}
Executor4Test
<
paddle_mobile
::
CPU
,
paddle_mobile
::
operators
::
ResizeOp
<
paddle_mobile
::
CPU
,
float
>>
executor
(
program
,
"resize"
);
paddle_mobile
::
framework
::
Tensor
input
;
SetupTensor
<
float
>
(
&
input
,
{
2
,
3
,
3
,
2
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
1
));
auto
input_ptr
=
input
.
data
<
float
>
();
auto
out_ddim
=
paddle_mobile
::
framework
::
make_ddim
({
2
,
9
,
2
});
auto
output
=
executor
.
Predict
(
input
,
"transpose_0.tmp_0"
,
"reshape_0.tmp_0"
,
out_ddim
);
auto
*
output_ptr
=
output
->
data
<
float
>
();
DLOG
<<
"input : "
;
for
(
int
j
=
0
;
j
<
input
.
numel
();
++
j
)
{
DLOG
<<
" index "
<<
j
<<
" : "
<<
input_ptr
[
j
];
}
DLOG
<<
"output : "
;
for
(
int
j
=
0
;
j
<
output
->
numel
();
++
j
)
{
DLOG
<<
" index "
<<
j
<<
" : "
<<
output_ptr
[
j
];
}
return
0
;
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
auto
program
=
loader
.
Load
(
std
::
string
(
g_mobilenet_ssd
));
if
(
program
.
originProgram
==
nullptr
)
{
DLOG
<<
"program read file"
;
}
Executor4Test
<
paddle_mobile
::
CPU
,
paddle_mobile
::
operators
::
ResizeOp
<
paddle_mobile
::
CPU
,
float
>>
executor
(
program
,
"resize"
);
paddle_mobile
::
framework
::
Tensor
input
;
SetupTensor
<
float
>
(
&
input
,
{
2
,
3
,
3
,
2
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
1
));
auto
input_ptr
=
input
.
data
<
float
>
();
auto
out_ddim
=
paddle_mobile
::
framework
::
make_ddim
({
2
,
9
,
2
});
auto
output
=
executor
.
Predict
(
input
,
"transpose_0.tmp_0"
,
"reshape_0.tmp_0"
,
out_ddim
);
auto
*
output_ptr
=
output
->
data
<
float
>
();
DLOG
<<
"input : "
;
for
(
int
j
=
0
;
j
<
input
.
numel
();
++
j
)
{
DLOG
<<
" index "
<<
j
<<
" : "
<<
input_ptr
[
j
];
}
DLOG
<<
"output : "
;
for
(
int
j
=
0
;
j
<
output
->
numel
();
++
j
)
{
DLOG
<<
" index "
<<
j
<<
" : "
<<
output_ptr
[
j
];
}
return
0
;
}
test/operators/test_slice_op.cpp
浏览文件 @
8735c538
...
...
@@ -15,4 +15,4 @@ limitations under the License. */
#include "../test_include.h"
#include "operators/slice_op.h"
int
main
()
{
}
int
main
()
{}
tools/build.sh
浏览文件 @
8735c538
#!/usr/bin/env bash
export
ANDROID_NDK
=
/Users/tianfei01/workspace/Android/NDK/android-ndk-r16b
build_for_mac
()
{
if
[
!
`
which brew
`
]
;
then
...
...
@@ -14,9 +13,6 @@ build_for_mac() {
return
fi
fi
alias
gcc
=
'gcc-5'
export
CC
=
gcc-5
export
CXX
=
g++-5
PLATFORM
=
"x86"
MODE
=
"Release"
BUILD_DIR
=
../build/release/
"
${
PLATFORM
}
"
...
...
@@ -36,8 +32,8 @@ build_for_mac() {
build_for_android
()
{
#rm -rf "../build"
if
[
-z
"
${
ANDROID_NDK
}
"
]
;
then
echo
"
ANDROID_NDK
not found!"
if
[
-z
"
${
NDK_ROOT
}
"
]
;
then
echo
"
NDK_ROOT
not found!"
exit
-1
fi
...
...
@@ -60,12 +56,10 @@ build_for_android() {
MODE
=
"Release"
ANDROID_PLATFORM_VERSION
=
"android-15"
#TOOLCHAIN_FILE="./tools/android-cmake/android.toolchain.cmake"
TOOLCHAIN_FILE
=
"
${
ANDROID_NDK
}
/build/cmake/android.toolchain.cmake"
ANDROID_PLATFORM_VERSION
=
"android-22"
TOOLCHAIN_FILE
=
"./tools/android-cmake/android.toolchain.cmake"
ANDROID_ARM_MODE
=
"arm"
if
[
$#
-eq
1
]
;
then
NET
=
$1
cmake ..
\
-B
"../build/release/
${
PLATFORM
}
"
\
-DANDROID_ABI
=
"
${
ABI
}
"
\
...
...
@@ -75,7 +69,7 @@ build_for_android() {
-DCMAKE_CXX_FLAGS
=
"
${
CXX_FLAGS
}
"
\
-DANDROID_STL
=
c++_static
\
-DANDROID
=
true
\
-D
"
${
NET
}
=true"
\
-D
NET
=
$1
\
-D
"
${
ARM_PLATFORM
}
"
=
true
else
...
...
@@ -95,7 +89,7 @@ build_for_android() {
}
build_for_ios
()
{
rm
-rf
"../build"
#
rm -rf "../build"
PLATFORM
=
"ios"
MODE
=
"Release"
BUILD_DIR
=
../build/release/
"
${
PLATFORM
}
"
...
...
@@ -104,7 +98,6 @@ build_for_ios() {
CXX_FLAGS
=
"-fobjc-abi-version=2 -fobjc-arc -std=gnu++14 -stdlib=libc++ -isysroot
${
CMAKE_OSX_SYSROOT
}
"
mkdir
-p
"
${
BUILD_DIR
}
"
if
[
$#
-eq
1
]
;
then
NET
=
$1
cmake ..
\
-B
"
${
BUILD_DIR
}
"
\
-DCMAKE_BUILD_TYPE
=
"
${
MODE
}
"
\
...
...
@@ -112,7 +105,7 @@ build_for_ios() {
-DIOS_PLATFORM
=
OS
\
-DCMAKE_C_FLAGS
=
"
${
C_FLAGS
}
"
\
-DCMAKE_CXX_FLAGS
=
"
${
CXX_FLAGS
}
"
\
-D
"
${
NET
}
"
=
true
\
-D
NET
=
$1
\
-DIS_IOS
=
"true"
else
cmake ..
\
...
...
@@ -126,6 +119,9 @@ build_for_ios() {
fi
cd
"
${
BUILD_DIR
}
"
make
-j
8
cd
./build
# 生成符号表
ranlib
*
.a
}
build_error
()
{
...
...
@@ -134,16 +130,12 @@ build_error() {
if
[
$#
-lt
1
]
;
then
echo
"error: target missing!"
echo
"available targets:
mac|linux|
ios|android"
echo
"sample usage: ./build.sh
mac
"
echo
"available targets: ios|android"
echo
"sample usage: ./build.sh
android
"
else
if
[
$#
-eq
2
]
;
then
if
[
$2
!=
"googlenet"
-a
$2
!=
"mobilenet"
-a
$2
!=
"yolo"
-a
$2
!=
"squeezenet"
-a
$2
!=
"resnet"
]
;
then
if
[
$1
=
"mac"
]
;
then
build_for_mac
elif
[
$1
=
"linux"
]
;
then
build_for_linux
elif
[
$1
=
"android"
]
;
then
if
[
$1
=
"android"
]
;
then
build_for_android
elif
[
$1
=
"ios"
]
;
then
build_for_ios
...
...
@@ -151,11 +143,7 @@ else
build_error
fi
else
if
[
$1
=
"mac"
]
;
then
build_for_mac
$2
elif
[
$1
=
"linux"
]
;
then
build_for_linux
$2
elif
[
$1
=
"android"
]
;
then
if
[
$1
=
"android"
]
;
then
build_for_android
$2
elif
[
$1
=
"ios"
]
;
then
build_for_ios
$2
...
...
@@ -164,11 +152,7 @@ else
fi
fi
else
if
[
$1
=
"mac"
]
;
then
build_for_mac
elif
[
$1
=
"linux"
]
;
then
build_for_linux
elif
[
$1
=
"android"
]
;
then
if
[
$1
=
"android"
]
;
then
build_for_android
elif
[
$1
=
"ios"
]
;
then
build_for_ios
...
...
@@ -176,4 +160,4 @@ else
build_error
fi
fi
fi
fi
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录