Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
8a3a01ee
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8a3a01ee
编写于
5月 26, 2018
作者:
L
liuruilong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
replace protobuf with protobuf-c
上级
f76986d9
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
398 addition
and
256 deletion
+398
-256
src/framework/attribute.h
src/framework/attribute.h
+78
-0
src/framework/program/block_desc.cpp
src/framework/program/block_desc.cpp
+8
-5
src/framework/program/block_desc.h
src/framework/program/block_desc.h
+3
-3
src/framework/program/op_desc.cpp
src/framework/program/op_desc.cpp
+16
-16
src/framework/program/op_desc.h
src/framework/program/op_desc.h
+2
-2
src/framework/program/program_desc.cpp
src/framework/program/program_desc.cpp
+20
-4
src/framework/program/program_desc.h
src/framework/program/program_desc.h
+4
-1
src/framework/program/tensor_desc.cpp
src/framework/program/tensor_desc.cpp
+5
-0
src/framework/program/tensor_desc.h
src/framework/program/tensor_desc.h
+75
-0
src/framework/program/var_desc.cpp
src/framework/program/var_desc.cpp
+0
-1
src/framework/program/var_desc.h
src/framework/program/var_desc.h
+110
-41
src/io.cpp
src/io.cpp
+75
-182
src/io.h
src/io.h
+1
-1
test/executor_for_test.h
test/executor_for_test.h
+1
-0
未找到文件。
src/framework/attribute.h
浏览文件 @
8a3a01ee
...
...
@@ -15,8 +15,10 @@ limitations under the License. */
#pragma once
#include "common/log.h"
#include "common/enforce.h"
#include "common/variant.h"
#include "framework/framework.pb.h"
#include "framework/framework.pb-c.h"
namespace
paddle_mobile
{
namespace
framework
{
...
...
@@ -89,6 +91,82 @@ class Attribute {
return
attr
;
}
/*
* PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INT = 0,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOAT = 1,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRING = 2,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS = 3,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS = 4,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS = 5,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN = 6,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS = 7,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK = 8,
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG = 9
PROTOBUF_C__FORCE_ENUM_TO_BE_INT_SIZE(PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE)
*
* */
static
Attribute
GetAttrValue
(
PaddleMobile__Framework__Proto__OpDesc__Attr
*
attr_desc
)
{
// std::cout << "begin get attr value" << std::endl;
Attribute
attr
;
switch
(
attr_desc
->
type
)
{
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEAN
:
{
attr
.
Set
<
bool
>
(
attr_desc
->
b
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INT
:
{
attr
.
Set
<
int
>
(
attr_desc
->
i
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOAT
:
{
attr
.
Set
<
float
>
(
attr_desc
->
f
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRING
:
{
attr
.
Set
<
std
::
string
>
(
std
::
string
(
attr_desc
->
s
));
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BOOLEANS
:
{
std
::
vector
<
bool
>
val
(
attr_desc
->
n_bools
);
for
(
int
i
=
0
;
i
<
attr_desc
->
n_bools
;
++
i
)
{
val
[
i
]
=
attr_desc
->
bools
[
i
];
}
attr
.
Set
<
std
::
vector
<
bool
>>
(
val
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__INTS
:
{
std
::
vector
<
int
>
val
(
attr_desc
->
n_ints
);
for
(
int
i
=
0
;
i
<
attr_desc
->
n_ints
;
++
i
)
{
val
[
i
]
=
attr_desc
->
ints
[
i
];
}
attr
.
Set
<
std
::
vector
<
int
>>
(
val
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__FLOATS
:
{
std
::
vector
<
float
>
val
(
attr_desc
->
n_floats
);
for
(
int
i
=
0
;
i
<
attr_desc
->
n_floats
;
++
i
)
{
val
[
i
]
=
attr_desc
->
floats
[
i
];
}
attr
.
Set
<
std
::
vector
<
float
>>
(
val
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__STRINGS
:
{
std
::
vector
<
std
::
string
>
val
(
attr_desc
->
n_strings
);
for
(
int
i
=
0
;
i
<
attr_desc
->
n_strings
;
++
i
)
{
val
[
i
]
=
attr_desc
->
strings
[
i
];
}
attr
.
Set
<
std
::
vector
<
std
::
string
>>
(
val
);
break
;
}
case
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__LONG
:
{
attr
.
Set
<
int64_t
>
(
attr_desc
->
l
);
break
;
}
default:
PADDLE_MOBILE_THROW_EXCEPTION
(
"attr type not support"
);
}
return
attr
;
}
Attribute
()
{}
template
<
typename
T
,
typename
...
Args
>
Attribute
&
Set
(
Args
&&
...
args
)
{
...
...
src/framework/program/block_desc.cpp
浏览文件 @
8a3a01ee
...
...
@@ -33,14 +33,17 @@ std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const {
return
res
;
}
BlockDesc
::
BlockDesc
(
const
proto
::
BlockDesc
&
desc
)
:
index_
(
desc
.
idx
()),
parent_index_
(
desc
.
parent_idx
()
)
{
for
(
const
proto
::
VarDesc
&
var_desc
:
desc
.
vars
())
{
vars_
[
var_desc
.
name
()].
reset
(
new
VarDesc
(
var_desc
));
BlockDesc
::
BlockDesc
(
PaddleMobile__Framework__Proto__BlockDesc
*
desc
)
:
index_
(
desc
->
idx
),
parent_index_
(
desc
->
idx
)
{
for
(
int
i
=
0
;
i
<
desc
->
n_vars
;
++
i
)
{
PaddleMobile__Framework__Proto__VarDesc
*
var_desc
=
desc
->
vars
[
i
];
vars_
[
std
::
string
(
var_desc
->
name
)]
=
std
::
shared_ptr
<
VarDesc
>
(
new
VarDesc
(
var_desc
));
}
for
(
const
proto
::
OpDesc
&
op_desc
:
desc
.
ops
())
{
for
(
int
j
=
0
;
j
<
desc
->
n_ops
;
++
j
)
{
PaddleMobile__Framework__Proto__OpDesc
*
op_desc
=
desc
->
ops
[
j
];
ops_
.
emplace_back
(
new
framework
::
OpDesc
(
op_desc
));
}
}
}
// namespace framework
...
...
src/framework/program/block_desc.h
浏览文件 @
8a3a01ee
...
...
@@ -15,9 +15,10 @@ limitations under the License. */
#pragma once
#include "framework/framework.pb.h"
#include "framework/
paddle_mobile_object
.h"
#include "framework/
framework.pb-c
.h"
#include "framework/program/op_desc.h"
#include "framework/program/var_desc.h"
#include "framework/paddle_mobile_object.h"
namespace
paddle_mobile
{
namespace
framework
{
...
...
@@ -26,8 +27,7 @@ class BlockDesc : PaddleMobileObject {
public:
friend
class
Node
;
friend
class
ProgramOptimize
;
BlockDesc
(
const
proto
::
BlockDesc
&
desc
);
BlockDesc
(
PaddleMobile__Framework__Proto__BlockDesc
*
desc
);
BlockDesc
(
const
BlockDesc
&
block_desc
)
:
index_
(
block_desc
.
index_
),
parent_index_
(
block_desc
.
parent_index_
)
{
for
(
auto
&
op_desc
:
block_desc
.
ops_
)
{
...
...
src/framework/program/op_desc.cpp
浏览文件 @
8a3a01ee
...
...
@@ -20,28 +20,28 @@ limitations under the License. */
namespace
paddle_mobile
{
namespace
framework
{
OpDesc
::
OpDesc
(
const
proto
::
OpDesc
&
desc
)
:
type_
(
desc
.
type
()
)
{
for
(
int
i
=
0
;
i
<
desc
.
inputs_size
();
++
i
)
{
const
proto
::
OpDesc
::
Var
&
var
=
desc
.
inputs
(
i
);
std
::
vector
<
std
::
string
>
&
args
=
inputs_
[
var
.
parameter
()
];
int
arg_size
=
var
.
arguments_size
()
;
for
(
int
j
=
0
;
j
<
arg_size
;
++
j
)
{
args
.
push_back
(
var
.
arguments
(
j
));
OpDesc
::
OpDesc
(
PaddleMobile__Framework__Proto__OpDesc
*
desc
)
{
this
->
type_
=
std
::
string
(
desc
->
type
);
for
(
int
i
=
0
;
i
<
desc
->
n_inputs
;
++
i
)
{
PaddleMobile__Framework__Proto__OpDesc__Var
*
var
=
desc
->
inputs
[
i
];
std
::
vector
<
std
::
string
>
&
args
=
inputs_
[
std
::
string
(
var
->
parameter
)]
;
for
(
int
j
=
0
;
j
<
var
->
n_arguments
;
++
j
)
{
args
.
emplace_back
(
std
::
string
(
var
->
arguments
[
j
]
));
}
}
for
(
int
i
=
0
;
i
<
desc
.
outputs_size
();
++
i
)
{
const
proto
::
OpDesc
::
Var
&
var
=
desc
.
outputs
(
i
);
std
::
vector
<
std
::
string
>
&
args
=
outputs_
[
var
.
parameter
()];
int
arg_size
=
var
.
arguments_size
();
for
(
int
j
=
0
;
j
<
arg_size
;
++
j
)
{
args
.
push_back
(
var
.
arguments
(
j
));
for
(
int
i
=
0
;
i
<
desc
->
n_outputs
;
++
i
)
{
PaddleMobile__Framework__Proto__OpDesc__Var
*
var
=
desc
->
outputs
[
i
];
std
::
vector
<
std
::
string
>
&
args
=
outputs_
[
std
::
string
(
var
->
parameter
)];
for
(
int
j
=
0
;
j
<
var
->
n_arguments
;
++
j
)
{
args
.
emplace_back
(
std
::
string
(
var
->
arguments
[
j
]));
}
}
for
(
const
proto
::
OpDesc
::
Attr
&
attr
:
desc
.
attrs
())
{
std
::
string
attr_name
=
attr
.
name
();
if
(
attr
.
type
()
!=
proto
::
AttrType
::
BLOCK
)
{
for
(
int
k
=
0
;
k
<
desc
->
n_attrs
;
++
k
)
{
PaddleMobile__Framework__Proto__OpDesc__Attr
*
attr
=
desc
->
attrs
[
k
];
std
::
string
attr_name
(
attr
->
name
);
if
(
attr
->
type
!=
PADDLE_MOBILE__FRAMEWORK__PROTO__ATTR_TYPE__BLOCK
)
{
attrs_
[
attr_name
]
=
Attribute
::
GetAttrValue
(
attr
);
}
}
...
...
src/framework/program/op_desc.h
浏览文件 @
8a3a01ee
...
...
@@ -20,6 +20,7 @@ limitations under the License. */
#include "common/log.h"
#include "common/type_define.h"
#include "framework/framework.pb.h"
#include "framework/framework.pb-c.h"
#include "framework/paddle_mobile_object.h"
namespace
paddle_mobile
{
...
...
@@ -30,8 +31,7 @@ class OpDesc : PaddleMobileObject {
friend
class
ProgramOptimize
;
friend
class
FusionOpMatcher
;
friend
class
Node
;
explicit
OpDesc
(
const
proto
::
OpDesc
&
desc
);
explicit
OpDesc
(
PaddleMobile__Framework__Proto__OpDesc
*
op_desc
);
OpDesc
(
const
OpDesc
&
op_desc
)
:
type_
(
op_desc
.
type_
)
{
this
->
inputs_
=
op_desc
.
inputs_
;
this
->
outputs_
=
op_desc
.
outputs_
;
...
...
src/framework/program/program_desc.cpp
浏览文件 @
8a3a01ee
...
...
@@ -16,14 +16,14 @@ limitations under the License. */
#include <vector>
#include "program_desc.h"
#include "framework/program/tensor_desc.h"
namespace
paddle_mobile
{
namespace
framework
{
ProgramDesc
::
ProgramDesc
(
const
proto
::
ProgramDesc
&
desc
)
{
for
(
auto
&
block_desc
:
desc
.
blocks
())
{
// new framework::BlockDesc(block_desc)
blocks_
.
emplace_back
(
std
::
make_shared
<
BlockDesc
>
(
block_desc
));
ProgramDesc
::
ProgramDesc
(
PaddleMobile__Framework__Proto__ProgramDesc
*
desc
)
{
for
(
int
i
=
0
;
i
<
desc
->
n_blocks
;
++
i
)
{
blocks_
.
emplace_back
(
std
::
make_shared
<
BlockDesc
>
(
desc
->
blocks
[
i
]));
}
}
...
...
@@ -55,6 +55,22 @@ void ProgramDesc::Description(std::string header) {
LOG
(
kLOG_DEBUG3
)
<<
"argument - "
<<
attr
.
second
;
}
}
for
(
const
auto
&
var_desc
:
block
->
Vars
())
{
if
(
var_desc
->
Type
()
==
VARTYPE_TYPE_LOD_TENSOR
)
{
LOG
(
kLOG_DEBUG1
)
<<
"var name: "
<<
var_desc
->
Name
();
const
TensorDesc
&
tensor_desc
=
var_desc
->
Tensor_desc
();
LOG
(
kLOG_DEBUG2
)
<<
"in var tensor desc dims size: "
<<
tensor_desc
.
Dims
().
size
();
for
(
int
l
=
0
;
l
<
tensor_desc
.
Dims
().
size
();
++
l
)
{
LOG
(
kLOG_DEBUG3
)
<<
"var tensor desc dim "
<<
l
<<
" value: "
<<
tensor_desc
.
Dims
()[
l
];
}
}
}
}
#endif
}
...
...
src/framework/program/program_desc.h
浏览文件 @
8a3a01ee
...
...
@@ -17,6 +17,7 @@ limitations under the License. */
#include <vector>
#include "common/types.h"
#include "framework/framework.pb-c.h"
#include "framework/paddle_mobile_object.h"
#include "framework/program/block_desc.h"
...
...
@@ -27,8 +28,10 @@ class ProgramDesc : PaddleMobileObject {
public:
friend
class
Node
;
friend
class
ProgramOptimize
;
explicit
ProgramDesc
(
const
proto
::
ProgramDesc
&
desc
);
explicit
ProgramDesc
(
PaddleMobile__Framework__Proto__ProgramDesc
*
desc
);
std
::
shared_ptr
<
BlockDesc
>
Block
(
size_t
idx
);
const
std
::
vector
<
std
::
shared_ptr
<
BlockDesc
>>
&
Blocks
()
{
return
blocks_
;
}
ProgramDesc
(
const
ProgramDesc
&
program_desc
)
{
for
(
auto
&
block
:
program_desc
.
blocks_
)
{
...
...
src/framework/program/tensor_desc.cpp
0 → 100644
浏览文件 @
8a3a01ee
//
// Created by liuRuiLong on 2018/5/26.
//
#include "tensor_desc.h"
src/framework/program/tensor_desc.h
0 → 100644
浏览文件 @
8a3a01ee
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "framework/framework.pb-c.h"
namespace
paddle_mobile
{
namespace
framework
{
enum
VarType_Type
{
VARTYPE_TYPE_BOOL
=
0
,
VARTYPE_TYPE_INT16
=
1
,
VARTYPE_TYPE_INT32
=
2
,
VARTYPE_TYPE_INT64
=
3
,
VARTYPE_TYPE_FP16
=
4
,
VARTYPE_TYPE_FP32
=
5
,
VARTYPE_TYPE_FP64
=
6
,
VARTYPE_TYPE_LOD_TENSOR
=
7
,
VARTYPE_TYPE_SELECTED_ROWS
=
8
,
VARTYPE_TYPE_FEED_MINIBATCH
=
9
,
VARTYPE_TYPE_FETCH_LIST
=
10
,
VARTYPE_TYPE_STEP_SCOPES
=
11
,
VARTYPE_TYPE_STEP_LOD_RANK_TABLE
=
12
,
VARTYPE_TYPE_STEP_LOD_TENSOR_ARRAY
=
13
,
VARTYPE_TYPE_STEP_PLACE_LIST
=
14
,
VARTYPE_TYPE_READER
=
15
,
VARTYPE_TYPE_CHANNEL
=
16
,
VARTYPE_TYPE_RAW
=
17
,
VARTYPE_TYPE_TUPLE
=
18
};
class
TensorDesc
{
public:
TensorDesc
()
=
default
;
TensorDesc
(
const
TensorDesc
&
desc
)
{
this
->
dims_
=
desc
.
dims_
;
this
->
data_type_
=
desc
.
data_type_
;
}
TensorDesc
(
PaddleMobile__Framework__Proto__VarType__TensorDesc
*
desc
)
{
for
(
int
i
=
0
;
i
<
desc
->
n_dims
;
++
i
)
{
int64_t
d
=
desc
->
dims
[
i
];
dims_
.
emplace_back
(
d
);
}
data_type_
=
(
VarType_Type
)
desc
->
data_type
;
}
std
::
vector
<
int64_t
>
Dims
()
const
{
return
dims_
;
};
VarType_Type
DataType
()
const
{
return
data_type_
;
}
private:
std
::
vector
<
int64_t
>
dims_
;
VarType_Type
data_type_
;
};
}
}
src/framework/program/var_desc.cpp
浏览文件 @
8a3a01ee
...
...
@@ -18,7 +18,6 @@ namespace paddle_mobile {
namespace
framework
{
VarDesc
::
VarDesc
(
const
proto
::
VarDesc
&
desc
)
:
desc_
(
desc
)
{}
}
// namespace framework
}
// namespace paddle_mobile
src/framework/program/var_desc.h
浏览文件 @
8a3a01ee
...
...
@@ -15,71 +15,140 @@ limitations under the License. */
#pragma once
#include "framework/framework.pb.h"
#include "framework/framework.pb-c.h"
#include "framework/program/tensor_desc.h"
#include "framework/paddle_mobile_object.h"
namespace
paddle_mobile
{
namespace
framework
{
class
VarDesc
{
public:
VarDesc
(
const
proto
::
VarDesc
&
desc
);
/*
VarDesc
(
const
VarDesc
&
var_desc
)
:
desc_
(
var_desc
.
desc_
)
{}
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__BOOL = 0,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__INT16 = 1,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__INT32 = 2,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__INT64 = 3,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__FP16 = 4,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__FP32 = 5,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__FP64 = 6,
std
::
string
Name
()
const
{
return
desc_
.
name
();
}
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__LOD_TENSOR = 7,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__SELECTED_ROWS = 8,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__FEED_MINIBATCH = 9,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__FETCH_LIST = 10,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__STEP_SCOPES = 11,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__LOD_RANK_TABLE = 12,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__LOD_TENSOR_ARRAY = 13,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__PLACE_LIST = 14,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__READER = 15,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__CHANNEL = 16,
proto
::
VarType
::
Type
GetType
()
const
{
return
desc_
.
type
().
type
();
}
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__RAW = 17,
PADDLE_MOBILE__FRAMEWORK__PROTO__VAR_TYPE__TYPE__TUPLE = 18
bool
Persistable
()
const
{
return
desc_
.
persistable
();
}
const
proto
::
VarType
::
ChannelDesc
&
channel_desc
()
const
{
switch
(
desc_
.
type
().
type
())
{
case
proto
::
VarType
::
CHANNEL
:
return
desc_
.
type
().
channel
();
default:
break
;
}
*/
class
VarDesc
{
public:
VarDesc
(
const
VarDesc
&
var_desc
)
{
this
->
data_type_
=
var_desc
.
data_type_
;
this
->
name_
=
var_desc
.
name_
;
this
->
persistable_
=
var_desc
.
persistable_
;
this
->
tensor_desc_
=
var_desc
.
tensor_desc_
;
this
->
type_
=
var_desc
.
type_
;
/*
*
* std::string name_;
bool persistable_;
TensorDesc tensor_desc_;
VarType_Type type_;
VarType_Type data_type_;
* */
}
VarDesc
(
PaddleMobile__Framework__Proto__VarDesc
*
desc
)
{
type_
=
(
VarType_Type
)
desc
->
type
->
type
;
name_
=
std
::
string
(
desc
->
name
);
persistable_
=
(
bool
)
desc
->
persistable
;
switch
(
type_
)
{
case
VARTYPE_TYPE_SELECTED_ROWS
:
tensor_desc_
=
TensorDesc
(
desc
->
type
->
selected_rows
);
break
;
case
VARTYPE_TYPE_LOD_TENSOR
:
tensor_desc_
=
TensorDesc
(
desc
->
type
->
lod_tensor
->
tensor
);
break
;
case
VARTYPE_TYPE_STEP_LOD_TENSOR_ARRAY
:
desc
->
type
->
tensor_array
->
tensor
->
data_type
;
tensor_desc_
=
TensorDesc
(
desc
->
type
->
tensor_array
->
tensor
);
const
proto
::
VarType
::
TensorDesc
&
tensor_desc
()
const
{
switch
(
desc_
.
type
().
type
())
{
case
proto
::
VarType
::
SELECTED_ROWS
:
return
desc_
.
type
().
selected_rows
();
case
proto
::
VarType
::
LOD_TENSOR
:
return
desc_
.
type
().
lod_tensor
().
tensor
();
case
proto
::
VarType
::
LOD_TENSOR_ARRAY
:
return
desc_
.
type
().
tensor_array
().
tensor
();
break
;
default:
break
;
}
}
proto
::
VarType
::
Type
GetDataType
()
const
{
switch
(
desc_
.
type
().
type
())
{
case
proto
::
VarType
::
CHANNEL
:
return
channel_desc
().
data_type
();
switch
(
type_
)
{
case
VARTYPE_TYPE_CHANNEL
:
data_type_
=
(
VarType_Type
)
desc
->
type
->
channel
->
data_type
;
break
;
default:
return
tensor_desc
().
data_type
();
data_type_
=
tensor_desc_
.
DataType
();
break
;
}
}
template
<
typename
T
>
std
::
vector
<
T
>
RepeatedToVector
(
const
google
::
protobuf
::
RepeatedField
<
T
>
&
repeated_field
)
const
{
std
::
vector
<
T
>
ret
;
ret
.
reserve
(
repeated_field
.
size
());
std
::
copy
(
repeated_field
.
begin
(),
repeated_field
.
end
(),
std
::
back_inserter
(
ret
));
return
ret
;
}
std
::
string
Name
()
const
{
return
name_
;
}
VarType_Type
Type
()
const
{
return
type_
;
}
bool
Persistable
()
const
{
return
persistable_
;
}
std
::
vector
<
int64_t
>
GetShape
()
const
{
return
t
his
->
RepeatedToVector
(
tensor_desc
().
dims
())
;
const
TensorDesc
&
Tensor_desc
()
const
{
return
t
ensor_desc_
;
}
// const proto::VarType::ChannelDesc &channel_desc() const {
// switch (desc_.type().type()) {
// case proto::VarType::CHANNEL:
// return desc_.type().channel();
// default:
// break;
// }
// }
// proto::VarType::Type GetDataType() const {
// switch (desc_.type().type()) {
// case proto::VarType::CHANNEL:
// return channel_desc().data_type();
// break;
// default:
// return tensor_desc().data_type();
// }
// }
// template <typename T>
// std::vector<T> RepeatedToVector(
// const google::protobuf::RepeatedField<T> &repeated_field) const {
// std::vector<T> ret;
// ret.reserve(repeated_field.size());
// std::copy(repeated_field.begin(), repeated_field.end(),
// std::back_inserter(ret));
// return ret;
// }
// std::vector<int64_t> GetShape() const {
// return this->RepeatedToVector(tensor_desc().dims());
// }
private:
proto
::
VarDesc
desc_
;
std
::
string
name_
;
bool
persistable_
;
TensorDesc
tensor_desc_
;
VarType_Type
type_
;
VarType_Type
data_type_
;
};
}
// namespace framework
...
...
src/io.cpp
浏览文件 @
8a3a01ee
...
...
@@ -16,14 +16,16 @@ limitations under the License. */
#include <fstream>
#include <vector>
#include "common/enforce.h"
#include "common/log.h"
#include "framework/framework.pb.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/program_desc.h"
#include "common/enforce.h"
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/operator.h"
#include "framework/lod_tensor.h"
#include "framework/framework.pb.h"
#include "framework/framework.pb-c.h"
#include "framework/program/var_desc.h"
#include "framework/program/program_desc.h"
namespace
paddle_mobile
{
...
...
@@ -39,12 +41,37 @@ void ReadBinaryFile(const std::string &filename, std::string *contents) {
fin
.
close
();
}
static
size_t
ReadBuffer
(
const
char
*
file_name
,
uint8_t
**
out
)
{
printf
(
"%s
\n
"
,
file_name
);
FILE
*
fp
;
fp
=
fopen
(
file_name
,
"rb"
);
PADDLE_MOBILE_ENFORCE
(
fp
!=
NULL
,
"open failed !"
);
fseek
(
fp
,
0
,
SEEK_END
);
size_t
size
=
ftell
(
fp
);
rewind
(
fp
);
DLOG
<<
"model size: "
<<
size
;
*
out
=
(
uint8_t
*
)
malloc
(
size
);
size_t
cur_len
=
0
;
size_t
nread
;
while
((
nread
=
fread
(
*
out
+
cur_len
,
1
,
size
-
cur_len
,
fp
))
!=
0
)
{
cur_len
+=
nread
;
}
fclose
(
fp
);
return
cur_len
;
}
template
<
typename
Dtype
,
Precision
P
>
void
Loader
<
Dtype
,
P
>::
LoadVar
(
framework
::
LoDTensor
*
tensor
,
void
Loader
<
Dtype
,
P
>::
LoadVar
(
framework
::
Variable
*
variable
,
const
framework
::
VarDesc
&
var_desc
,
const
std
::
string
&
file_path
)
{
auto
tensor
=
variable
->
GetMutable
<
framework
::
LoDTensor
>
();
std
::
ifstream
is
(
file_path
);
PADDLE_MOBILE_ENFORCE
(
is
.
is_open
(),
"open file: %s failed"
,
file_path
.
c_str
());
std
::
fpos
<
mbstate_t
>
pos
;
pos
=
is
.
tellg
();
// save current position
is
.
seekg
(
0
,
std
::
ios
::
end
);
...
...
@@ -81,39 +108,44 @@ void Loader<Dtype, P>::LoadVar(framework::LoDTensor *tensor,
std
::
unique_ptr
<
char
[]
>
buf
(
new
char
[
size
]);
is
.
read
(
reinterpret_cast
<
char
*>
(
buf
.
get
()),
size
);
framework
::
proto
::
VarType
::
TensorDesc
desc
;
desc
.
ParseFromArray
(
buf
.
get
(),
size
);
const
framework
::
TensorDesc
&
desc
=
var_desc
.
Tensor_desc
();
// framework::TensorDesc &tensor_desc = variable->
// PaddleMobile__Framework__Proto__ProgramDesc *c_program;
// uint8_t *proto_buf = NULL;
// size_t read_size = ReadBuffer(file_path.c_str(), &proto_buf);
// c_program = paddle_mobile__framework__proto__program_desc__unpack(NULL, read_size, buf);
// paddle_mobile__framework__proto__var_type__tensor_desc__init()
int
memory_size
=
1
;
for
(
auto
l
:
desc
.
d
ims
())
{
for
(
auto
l
:
desc
.
D
ims
())
{
memory_size
*=
l
;
}
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
static_cast
<
size_t
>
(
desc
.
dims
().
size
()));
std
::
copy
(
desc
.
dims
().
begin
(),
desc
.
dims
().
end
(),
std
::
back_inserter
(
dims
));
tensor
->
Resize
(
framework
::
make_ddim
(
dims
));
tensor
->
Resize
(
framework
::
make_ddim
(
desc
.
Dims
()));
void
*
memory
=
tensor
;
int
type_size
=
0
;
switch
(
desc
.
data_t
ype
())
{
case
framework
::
proto
::
VarType
::
FP16
:
switch
(
desc
.
DataT
ype
())
{
case
framework
::
VARTYPE_TYPE_
FP16
:
type_size
=
2
;
break
;
case
framework
::
proto
::
VarType
::
FP32
:
case
framework
::
VARTYPE_TYPE_
FP32
:
type_size
=
4
;
memory
=
tensor
->
mutable_data
<
float
>
();
break
;
case
framework
::
proto
::
VarType
::
FP64
:
case
framework
::
VARTYPE_TYPE_
FP64
:
type_size
=
8
;
break
;
case
framework
::
proto
::
VarType
::
INT32
:
case
framework
::
VARTYPE_TYPE_
INT32
:
type_size
=
4
;
break
;
case
framework
::
proto
::
VarType
::
INT64
:
case
framework
::
VARTYPE_TYPE_
INT64
:
type_size
=
8
;
break
;
case
framework
::
proto
::
VarType
::
BOOL
:
case
framework
::
VARTYPE_TYPE_
BOOL
:
type_size
=
1
;
break
;
default:
...
...
@@ -128,13 +160,20 @@ template <typename Dtype, Precision P>
const
framework
::
Program
<
Dtype
,
P
>
Loader
<
Dtype
,
P
>::
Load
(
const
std
::
string
&
dirname
)
{
std
::
string
model_filename
=
dirname
+
"/__model__"
;
std
::
string
program_desc_str
;
ReadBinaryFile
(
model_filename
,
&
program_desc_str
);
framework
::
proto
::
ProgramDesc
program_desc_proto
;
program_desc_proto
.
ParseFromString
(
program_desc_str
);
PaddleMobile__Framework__Proto__ProgramDesc
*
c_program
;
uint8_t
*
buf
=
NULL
;
size_t
read_size
=
ReadBuffer
(
model_filename
.
c_str
(),
&
buf
);
PADDLE_MOBILE_ENFORCE
(
buf
!=
NULL
,
"read from __model__ is null"
);
c_program
=
paddle_mobile__framework__proto__program_desc__unpack
(
NULL
,
read_size
,
buf
);
PADDLE_MOBILE_ENFORCE
(
c_program
!=
NULL
,
"program is null"
);
DLOG
<<
"n_ops: "
<<
(
*
c_program
->
blocks
)
->
n_ops
;
std
::
shared_ptr
<
framework
::
ProgramDesc
>
originProgramDesc
=
std
::
make_shared
<
framework
::
ProgramDesc
>
(
program_desc_proto
);
std
::
make_shared
<
framework
::
ProgramDesc
>
(
c_program
);
framework
::
Program
<
Dtype
,
P
>
program
;
program
.
model_path
=
dirname
;
...
...
@@ -148,172 +187,26 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
for
(
const
auto
&
block
:
originProgramDesc
->
Blocks
())
{
for
(
int
i
=
0
;
i
<
block
->
Vars
().
size
();
++
i
)
{
std
::
shared_ptr
<
framework
::
VarDesc
>
var_desc
=
block
->
Vars
()[
i
];
// DLOG << "var name-- " << var_desc->Name();
auto
var
=
scope
->
Var
(
var_desc
->
Name
());
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
if
(
var_desc
->
Type
()
==
framework
::
VARTYPE_TYPE_LOD_TENSOR
)
{
if
(
var_desc
->
Persistable
()
&&
var_desc
->
GetType
()
!=
framework
::
proto
::
VarType
::
FEED_MINIBATCH
&&
var_desc
->
GetType
()
!=
framework
::
proto
::
VarType
::
FETCH_LIST
)
{
auto
tensor
=
var
->
GetMutable
<
framework
::
LoDTensor
>
();
// to load
LoadVar
(
tensor
,
dirname
+
"/"
+
var_desc
->
Name
());
var_desc
->
Type
()
!=
framework
::
VARTYPE_TYPE_FEED_MINIBATCH
&&
var_desc
->
Type
()
!=
framework
::
VARTYPE_TYPE_FETCH_LIST
)
{
// DLOG << "to load var ";
LoadVar
(
var
,
*
var_desc
,
dirname
+
"/"
+
var_desc
->
Name
());
}
}
else
{
// TODO(codeWorm): some.
}
}
}
#ifdef PADDLE_MOBILE_DEBUG
for
(
const
auto
&
block
:
program_desc_proto
.
blocks
())
{
LOG
(
kLOG_DEBUG
)
<<
"block: "
<<
block
.
idx
();
for
(
int
j
=
0
;
j
<
block
.
ops
().
size
();
++
j
)
{
framework
::
proto
::
OpDesc
op
=
block
.
ops
()[
j
];
LOG
(
kLOG_DEBUG1
)
<<
"op: "
<<
op
.
type
();
for
(
int
m
=
0
;
m
<
op
.
inputs_size
();
++
m
)
{
const
framework
::
proto
::
OpDesc
::
Var
&
var
=
op
.
inputs
(
m
);
LOG
(
kLOG_DEBUG2
)
<<
"input parameter: "
<<
var
.
parameter
();
for
(
const
auto
&
n
:
var
.
arguments
())
{
LOG
(
kLOG_DEBUG3
)
<<
"argument - "
<<
n
;
}
}
for
(
int
y
=
0
;
y
<
op
.
outputs_size
();
++
y
)
{
const
framework
::
proto
::
OpDesc
::
Var
&
var
=
op
.
outputs
(
y
);
LOG
(
kLOG_DEBUG2
)
<<
"out parameter: "
<<
var
.
parameter
();
for
(
const
auto
&
z
:
var
.
arguments
())
{
LOG
(
kLOG_DEBUG3
)
<<
"argument - "
<<
z
;
}
}
for
(
const
auto
&
attr
:
op
.
attrs
())
{
LOG
(
kLOG_DEBUG2
)
<<
"attr name: "
<<
attr
.
name
();
switch
(
attr
.
type
())
{
case
framework
::
proto
::
AttrType
::
BOOLEAN
:
LOG
(
kLOG_DEBUG3
)
<<
"boolen: "
<<
attr
.
b
();
break
;
case
framework
::
proto
::
AttrType
::
INT
:
LOG
(
kLOG_DEBUG3
)
<<
"int: "
<<
attr
.
i
();
break
;
case
framework
::
proto
::
AttrType
::
FLOAT
:
LOG
(
kLOG_DEBUG3
)
<<
"float: "
<<
attr
.
f
();
case
framework
::
proto
::
AttrType
::
STRING
:
LOG
(
kLOG_DEBUG3
)
<<
"string: "
<<
attr
.
s
();
case
framework
::
proto
::
AttrType
::
BOOLEANS
:
for
(
int
y
=
0
;
y
<
attr
.
bools_size
();
++
y
)
{
LOG
(
kLOG_DEBUG3
)
<<
"bools: "
<<
attr
.
bools
(
y
);
}
case
framework
::
proto
::
AttrType
::
LONG
:
LOG
(
kLOG_DEBUG3
)
<<
"long: "
<<
attr
.
l
();
case
framework
::
proto
::
AttrType
::
FLOATS
:
for
(
int
y
=
0
;
y
<
attr
.
floats_size
();
++
y
)
{
LOG
(
kLOG_DEBUG3
)
<<
"floats: "
<<
attr
.
floats
(
y
);
}
case
framework
::
proto
::
AttrType
::
INTS
:
for
(
int
y
=
0
;
y
<
attr
.
ints_size
();
++
y
)
{
LOG
(
kLOG_DEBUG3
)
<<
"ints: "
<<
attr
.
ints
(
y
);
}
case
framework
::
proto
::
AttrType
::
STRINGS
:
for
(
int
y
=
0
;
y
<
attr
.
strings_size
();
++
y
)
{
LOG
(
kLOG_DEBUG3
)
<<
"strings: "
<<
attr
.
strings
(
y
);
}
case
framework
::
proto
::
BLOCK
:
break
;
}
}
}
for
(
const
auto
&
var
:
block
.
vars
())
{
if
(
var
.
type
().
type
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
LOG
(
kLOG_DEBUG1
)
<<
"var name: "
<<
var
.
name
();
const
framework
::
proto
::
VarType
::
TensorDesc
&
tensor_desc
=
var
.
type
().
lod_tensor
().
tensor
();
LOG
(
kLOG_DEBUG2
)
<<
"in var tensor desc dims size: "
<<
tensor_desc
.
dims
().
size
();
for
(
int
l
=
0
;
l
<
tensor_desc
.
dims
().
size
();
++
l
)
{
LOG
(
kLOG_DEBUG3
)
<<
"var tensor desc dim "
<<
l
<<
" value: "
<<
tensor_desc
.
dims
()[
l
];
}
}
if
(
var
.
persistable
()
&&
var
.
type
().
type
()
!=
framework
::
proto
::
VarType
::
FEED_MINIBATCH
&&
var
.
type
().
type
()
!=
framework
::
proto
::
VarType
::
FETCH_LIST
)
{
std
::
string
file_path
=
dirname
+
"/"
+
var
.
name
();
std
::
ifstream
is
(
file_path
);
PADDLE_MOBILE_ENFORCE
(
is
.
is_open
(),
"open file: %s failed"
,
file_path
.
c_str
());
std
::
fpos
<
mbstate_t
>
pos
;
pos
=
is
.
tellg
();
// save current position
is
.
seekg
(
0
,
std
::
ios
::
end
);
is
.
seekg
(
pos
);
// restore saved position
// 1. version
uint32_t
version
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
// 2 Lod information
uint64_t
lod_level
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
lod_level
),
sizeof
(
lod_level
));
for
(
uint64_t
i
=
0
;
i
<
lod_level
;
++
i
)
{
uint64_t
size
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
size
),
sizeof
(
size
));
std
::
vector
<
size_t
>
tmp
(
size
/
sizeof
(
size_t
));
is
.
read
(
reinterpret_cast
<
char
*>
(
tmp
.
data
()),
static_cast
<
std
::
streamsize
>
(
size
));
for
(
int
j
=
0
;
j
<
tmp
.
size
();
++
j
)
{
}
}
is
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
int32_t
size
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
size
),
sizeof
(
size
));
std
::
unique_ptr
<
char
[]
>
buf
(
new
char
[
size
]);
is
.
read
(
reinterpret_cast
<
char
*>
(
buf
.
get
()),
size
);
framework
::
proto
::
VarType
::
TensorDesc
desc
;
desc
.
ParseFromArray
(
buf
.
get
(),
size
);
int
memory_size
=
1
;
for
(
long
long
l
:
desc
.
dims
())
{
memory_size
*=
l
;
}
int
type_size
=
0
;
switch
(
desc
.
data_type
())
{
case
framework
::
proto
::
VarType
::
FP16
:
type_size
=
2
;
break
;
case
framework
::
proto
::
VarType
::
FP32
:
type_size
=
4
;
break
;
case
framework
::
proto
::
VarType
::
FP64
:
type_size
=
8
;
break
;
case
framework
::
proto
::
VarType
::
INT32
:
type_size
=
4
;
break
;
case
framework
::
proto
::
VarType
::
INT64
:
type_size
=
8
;
break
;
case
framework
::
proto
::
VarType
::
BOOL
:
type_size
=
1
;
break
;
default:
break
;
}
void
*
memory
=
malloc
(
memory_size
*
type_size
);
is
.
read
(
static_cast
<
char
*>
(
memory
),
memory_size
*
type_size
);
is
.
close
();
}
else
{
// TODO
}
}
}
originProgramDesc
->
Description
(
"program: "
);
#endif
paddle_mobile__framework__proto__program_desc__free_unpacked
(
c_program
,
NULL
);
return
program
;
}
...
...
@@ -440,7 +333,7 @@ void Executor<Dtype, P>::InitMemory() {
auto
tensor
=
var
->
template
GetMutable
<
framework
::
LoDTensor
>();
LoadMemory
(
tensor
,
program_
.
model_path
+
"/"
+
var_desc
->
Name
());
}
else
{
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
if
(
var_desc
->
Type
()
==
framework
::
VARTYPE_TYPE_
LOD_TENSOR
)
{
auto
tensor
=
var
->
template
GetMutable
<
framework
::
Tensor
>();
tensor
->
template
mutable_data
<
Ptype
>();
}
...
...
src/io.h
浏览文件 @
8a3a01ee
...
...
@@ -33,7 +33,7 @@ class Loader : PaddleMobileObject {
const
framework
::
Program
<
Dtype
,
P
>
Load
(
const
std
::
string
&
dirname
);
private:
void
LoadVar
(
framework
::
LoDTensor
*
tensor
,
const
std
::
string
&
file_path
);
void
LoadVar
(
framework
::
Variable
*
variable
,
const
framework
::
VarDesc
&
var_desc
,
const
std
::
string
&
file_path
);
};
template
<
typename
Dtype
,
Precision
P
=
Precision
::
FP32
>
...
...
test/executor_for_test.h
浏览文件 @
8a3a01ee
...
...
@@ -55,6 +55,7 @@ class Executor4Test : public Executor<DeviceType> {
for
(
std
::
shared_ptr
<
BlockDesc
>
block_desc
:
blocks
)
{
std
::
vector
<
std
::
shared_ptr
<
OpDesc
>>
ops
=
block_desc
->
Ops
();
for
(
std
::
shared_ptr
<
OpDesc
>
op
:
ops
)
{
if
(
op
->
Type
()
==
op_type
)
{
std
::
shared_ptr
<
OpType
>
op_ptr
=
std
::
make_shared
<
OpType
>
(
op
->
Type
(),
op
->
GetInputs
(),
op
->
GetOutputs
(),
op
->
GetAttrMap
(),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录