Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
896f52f2
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
896f52f2
编写于
5月 31, 2018
作者:
W
WangLiu
提交者:
GitHub
5月 31, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #324 from cocodark/develop
add impl for executor'predict
上级
21720ed1
a9853979
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
81 addition
and
40 deletion
+81
-40
CMakeLists.txt
CMakeLists.txt
+2
-1
scripts/push2android.sh
scripts/push2android.sh
+14
-0
src/common/enforce.h
src/common/enforce.h
+3
-3
src/framework/operator.cpp
src/framework/operator.cpp
+1
-13
src/framework/operator.h
src/framework/operator.h
+12
-0
src/io.cpp
src/io.cpp
+25
-9
src/io.h
src/io.h
+6
-4
src/operators/math/softmax.cpp
src/operators/math/softmax.cpp
+7
-1
test/net/test_googlenet.cpp
test/net/test_googlenet.cpp
+2
-5
test/net/test_mobilenet.cpp
test/net/test_mobilenet.cpp
+9
-4
未找到文件。
CMakeLists.txt
浏览文件 @
896f52f2
cmake_minimum_required
(
VERSION 3.0
)
project
(
paddle-mobile
)
add_definitions
(
-DPADDLE_MOBILE_DEBUG=
"true"
)
add_definitions
(
-DPADDLE_MOBILE_DEBUG
)
add_definitions
(
-DENABLE_EXCEPTION
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-std=c++11"
)
set
(
CMAKE_BUILD_TYPE RelWithDebInfo
)
...
...
scripts/push2android.sh
0 → 100644
浏览文件 @
896f52f2
#!/usr/bin/env sh
push_fn
()
{
MODELS_PATH
=
"../test/models/*"
EXE_FILE
=
"../test/build/*"
EXE_DIR
=
"data/local/tmp/bin"
MODELS_DIR
=
"data/local/tmp/models"
LIB_PATH
=
"../build/release/arm-v7a/build/*"
adb push
${
EXE_FILE
}
${
EXE_DIR
}
adb push
${
LIB_PATH
}
${
EXE_DIR
}
adb push
${
MODELS_PATH
}
${
MODELS_DIR
}
echo
"test files sync completed"
}
push_fn
src/common/enforce.h
浏览文件 @
896f52f2
...
...
@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#ifdef
PADDLE_MOBILE_DEBUG
#ifdef
ENABLE_EXCEPTION
#include <stdio.h>
#include <exception>
#include <sstream>
...
...
@@ -25,7 +25,7 @@ limitations under the License. */
namespace
paddle_mobile
{
#ifdef
PADDLE_MOBILE_DEBUG
#ifdef
ENABLE_EXCEPTION
struct
PaddleMobileException
:
public
std
::
exception
{
const
std
::
string
exception_prefix
=
"paddle mobile C++ Exception:
\n
"
;
std
::
string
message
;
...
...
@@ -64,7 +64,7 @@ struct PaddleMobileException : public std::exception {
}
#else
#define PADDLE_MOBILE_THROW_EXCEPTION(...)
#define PADDLE_MOBILE_
ASSERT
(stat, ...)
#define PADDLE_MOBILE_
ENFORCE
(stat, ...)
#endif
}
// namespace paddle_mobile
src/framework/operator.cpp
浏览文件 @
896f52f2
...
...
@@ -28,18 +28,6 @@ vector<string> OperatorBase<Dtype>::GetOutKeys() const {
return
it
->
second
.
second
;
}
template
<
typename
T
>
static
T
*
GetVarValue
(
const
string
&
key
,
const
VariableNameMap
&
var_map
,
const
Scope
&
scope
)
{
auto
var_vec
=
var_map
.
at
(
key
);
if
(
!
var_vec
.
empty
())
{
auto
var
=
scope
.
FindVar
(
var_vec
[
0
]);
return
var
->
GetMutable
<
T
>
();
}
else
{
return
nullptr
;
}
}
template
<
typename
Dtype
>
OperatorBase
<
Dtype
>::
OperatorBase
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
...
...
@@ -60,7 +48,7 @@ void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
template
<
typename
Dtype
>
void
OperatorBase
<
Dtype
>::
Run
()
const
{
RunImpl
();
#if
def PADDLE_MOBILE_DEBUG
#if
(PADDLE_MOBILE_DEBUG)
vector
<
string
>
output_keys
=
GetOutKeys
();
for
(
const
auto
key
:
output_keys
)
{
Tensor
*
out_
=
GetVarValue
<
framework
::
LoDTensor
>
(
key
,
outputs_
,
*
scope_
);
...
...
src/framework/operator.h
浏览文件 @
896f52f2
...
...
@@ -39,6 +39,18 @@ namespace framework {
using
std
::
string
;
using
std
::
vector
;
template
<
typename
T
>
static
T
*
GetVarValue
(
const
string
&
key
,
const
VariableNameMap
&
var_map
,
const
Scope
&
scope
)
{
auto
var_vec
=
var_map
.
at
(
key
);
if
(
!
var_vec
.
empty
())
{
auto
var
=
scope
.
FindVar
(
var_vec
[
0
]);
return
var
->
GetMutable
<
T
>
();
}
else
{
return
nullptr
;
}
}
template
<
typename
Dtype
>
class
OperatorBase
:
PaddleMobileObject
{
public:
...
...
src/io.cpp
浏览文件 @
896f52f2
...
...
@@ -371,31 +371,47 @@ void Executor<Dtype, P>::InitMemory() {
}
template
<
typename
Dtype
,
Precision
P
>
void
Executor
<
Dtype
,
P
>::
Predict
(
const
framework
::
Tensor
&
t
,
int
block_id
)
{
std
::
shared_ptr
<
framework
::
Tensor
>
Executor
<
Dtype
,
P
>::
Predict
(
const
framework
::
Tensor
&
t
)
{
framework
::
Variable
*
g_feed_value
=
program_
.
scope
->
Var
(
"feed"
);
framework
::
Tensor
*
feed_tensor
=
g_feed_value
->
GetMutable
<
framework
::
LoDTensor
>
();
feed_tensor
->
Resize
(
t
.
dims
());
feed_tensor
->
ShareDataWith
(
t
);
std
::
shared_ptr
<
framework
::
BlockDesc
>
to_predict_block
=
to_predict_program_
->
Block
(
block_id
);
to_predict_program_
->
Block
(
0
);
for
(
int
j
=
0
;
j
<
ops_of_block_
[
*
to_predict_block
.
get
()].
size
();
++
j
)
{
auto
op
=
ops_of_block_
[
*
to_predict_block
.
get
()][
j
];
op
->
Run
();
}
auto
ops
=
ops_of_block_
[
*
to_predict_program_
->
Block
(
0
)];
auto
last_op
=
ops
.
rbegin
();
auto
output_map
=
(
*
last_op
)
->
Outputs
();
std
::
vector
<
std
::
string
>
out_keys
=
(
*
last_op
)
->
GetOutKeys
();
PADDLE_MOBILE_ENFORCE
(
out_keys
.
size
()
>
0
,
"the last op contains no output"
);
framework
::
LoDTensor
*
output_tensor
=
framework
::
GetVarValue
<
framework
::
LoDTensor
>
(
out_keys
[
0
],
output_map
,
*
(
program_
.
scope
));
return
std
::
shared_ptr
<
framework
::
Tensor
>
(
output_tensor
);
}
template
<
typename
Dtype
,
Precision
P
>
std
::
shared_ptr
<
framework
::
Tensor
>
Executor
<
Dtype
,
P
>::
Predict
(
const
framework
::
Tensor
&
t
,
int
block_id
)
{
return
Predict
(
t
);
}
template
<
typename
Dtype
,
Precision
P
>
std
::
vector
<
typename
Executor
<
Dtype
,
P
>::
Ptype
>
Executor
<
Dtype
,
P
>::
Predict
(
const
std
::
vector
<
Ptype
>
&
input
,
const
std
::
vector
<
int64_t
>
&
dims
)
{
framework
::
Tensor
tensor
(
input
,
framework
::
make_ddim
(
dims
));
Predict
(
tensor
,
0
);
framework
::
Variable
*
g_feed_value
=
program_
.
scope
->
Var
(
"col"
);
auto
feed_tensor
=
g_feed_value
->
GetMutable
<
framework
::
Tensor
>
();
return
{};
std
::
shared_ptr
<
framework
::
Tensor
>
output_tensor
=
Predict
(
tensor
,
0
);
Executor
<
Dtype
,
P
>::
Ptype
*
output_ptr
=
output_tensor
->
data
<
typename
Executor
<
Dtype
,
P
>::
Ptype
>
();
std
::
vector
<
typename
Executor
<
Dtype
,
P
>::
Ptype
>
result_vector
;
for
(
int
j
=
0
;
j
<
output_tensor
->
numel
();
++
j
)
{
result_vector
.
push_back
(
output_ptr
[
j
]);
}
return
result_vector
;
}
template
class
Executor
<
CPU
,
Precision
::
FP32
>;
...
...
src/io.h
浏览文件 @
896f52f2
...
...
@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include <memory.h>
#include <map>
#include <string>
#include <vector>
...
...
@@ -44,24 +45,25 @@ class Executor {
public:
typedef
typename
PrecisionTrait
<
P
>::
ptype
Ptype
;
Executor
()
=
default
;
Executor
(
const
framework
::
Program
<
Dtype
>
p
,
int
batch_size
=
1
,
bool
use_optimize
=
true
);
// std::shared_ptr<framework::Tensor> Predict(
framework::Tensor &t);
std
::
shared_ptr
<
framework
::
Tensor
>
Predict
(
const
framework
::
Tensor
&
t
);
std
::
vector
<
Ptype
>
Predict
(
const
std
::
vector
<
Ptype
>
&
input
,
const
std
::
vector
<
int64_t
>
&
dims
);
protected:
Executor
()
=
default
;
void
InitMemory
();
void
LoadMemory
(
const
framework
::
VarDesc
var_desc
,
framework
::
LoDTensor
*
tensor
,
const
std
::
string
&
file_path
);
framework
::
Program
<
Dtype
>
program_
;
int
batch_size_
=
1
;
std
::
shared_ptr
<
framework
::
ProgramDesc
>
to_predict_program_
;
void
Predict
(
const
framework
::
Tensor
&
t
,
int
block_id
);
std
::
shared_ptr
<
framework
::
Tensor
>
Predict
(
const
framework
::
Tensor
&
t
,
int
block_id
);
std
::
map
<
framework
::
BlockDesc
,
std
::
vector
<
std
::
shared_ptr
<
framework
::
OperatorBase
<
Dtype
>>>>
ops_of_block_
;
...
...
src/operators/math/softmax.cpp
浏览文件 @
896f52f2
...
...
@@ -136,9 +136,15 @@ class SoftmaxFuntor<CPU, T> {
public:
void
operator
()(
const
framework
::
Tensor
*
X
,
framework
::
Tensor
*
Y
)
{
const
DDim
dDim
=
X
->
dims
();
for
(
int
i
=
0
;
i
<
dDim
[
0
];
++
i
)
{
framework
::
Tensor
sub_X
=
X
->
Slice
(
i
,
i
+
1
);
framework
::
Tensor
sub_Y
=
Y
->
Slice
(
i
,
i
+
1
);
#if __ARM_NEON
SoftmaxCacl
(
X
,
Y
);
SoftmaxCacl
(
&
sub_X
,
&
sub_
Y
);
#endif
}
}
};
...
...
test/net/test_googlenet.cpp
浏览文件 @
896f52f2
...
...
@@ -18,20 +18,17 @@ limitations under the License. */
int
main
()
{
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
auto
time1
=
time
();
auto
program
=
loader
.
Load
(
g_googlenet
,
false
);
auto
time2
=
time
();
DLOG
<<
"load cost :"
<<
time_diff
(
time1
,
time
1
)
<<
"ms
"
;
DLOG
<<
"load cost :"
<<
time_diff
(
time1
,
time
2
)
<<
"ms
\n
"
;
paddle_mobile
::
Executor
<
paddle_mobile
::
CPU
>
executor
(
program
,
1
,
false
);
std
::
vector
<
float
>
input
;
std
::
vector
<
int64_t
>
dims
{
1
,
3
,
224
,
224
};
GetInput
<
float
>
(
g_test_image_1x3x224x224
,
&
input
,
dims
);
auto
time3
=
time
();
executor
.
Predict
(
input
,
dims
);
auto
time4
=
time
();
DLOG
<<
"predict cost :"
<<
time_diff
(
time3
,
time4
)
<<
"ms"
;
DLOG
<<
"predict cost :"
<<
time_diff
(
time3
,
time4
)
<<
"ms
\n
"
;
return
0
;
}
test/net/test_mobilenet.cpp
浏览文件 @
896f52f2
...
...
@@ -22,17 +22,22 @@ int main() {
auto
program
=
loader
.
Load
(
g_mobilenet
,
false
);
auto
time2
=
time
();
DLOG
<<
"load cost :"
<<
time_diff
(
time1
,
time1
)
<<
"ms"
;
paddle_mobile
::
Executor
<
paddle_mobile
::
CPU
>
executor
(
program
,
1
,
false
);
paddle_mobile
::
Executor
<
paddle_mobile
::
CPU
>
executor
(
program
,
2
,
false
);
std
::
vector
<
int64_t
>
dims
{
1
,
3
,
224
,
224
};
std
::
vector
<
int64_t
>
dims
{
2
,
3
,
224
,
224
};
Tensor
input_tensor
;
SetupTensor
<
float
>
(
&
input_tensor
,
{
1
,
3
,
224
,
224
},
static_cast
<
float
>
(
0
),
SetupTensor
<
float
>
(
&
input_tensor
,
{
2
,
3
,
224
,
224
},
static_cast
<
float
>
(
0
),
static_cast
<
float
>
(
1
));
std
::
vector
<
float
>
input
(
input_tensor
.
data
<
float
>
(),
input_tensor
.
data
<
float
>
()
+
input_tensor
.
numel
());
auto
time3
=
time
();
executor
.
Predict
(
input
,
dims
);
auto
vec_result
=
executor
.
Predict
(
input
,
dims
);
float
sum
=
0
;
for
(
const
auto
item
:
vec_result
)
{
sum
+=
item
;
}
DLOG
<<
"mobilenet output sum ="
<<
sum
;
auto
time4
=
time
();
DLOG
<<
"predict cost :"
<<
time_diff
(
time3
,
time4
)
<<
"ms"
;
return
0
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录