Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
79ba1760
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
337
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
79ba1760
编写于
6月 04, 2018
作者:
L
liuruilong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add conv add fusion op
上级
dbeb1544
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
158 addition
and
31 deletion
+158
-31
src/common/types.h
src/common/types.h
+4
-1
src/framework/operator.h
src/framework/operator.h
+2
-2
src/framework/program/program-optimize/node.cpp
src/framework/program/program-optimize/node.cpp
+8
-4
src/framework/program/program-optimize/node.h
src/framework/program/program-optimize/node.h
+3
-2
src/framework/program/program-optimize/program_optimize.cpp
src/framework/program/program-optimize/program_optimize.cpp
+28
-7
src/io.cpp
src/io.cpp
+2
-0
src/io.h
src/io.h
+1
-1
src/operators/fusion_conv_add.cpp
src/operators/fusion_conv_add.cpp
+29
-0
src/operators/fusion_conv_add.h
src/operators/fusion_conv_add.h
+66
-0
src/operators/fusion_conv_add_relu_op.h
src/operators/fusion_conv_add_relu_op.h
+2
-2
src/operators/fusion_fc_op.h
src/operators/fusion_fc_op.h
+3
-3
test/framework/test_load.cpp
test/framework/test_load.cpp
+2
-2
test/framework/test_optimize.cpp
test/framework/test_optimize.cpp
+3
-3
test/net/test_mobilenet+ssd.cpp
test/net/test_mobilenet+ssd.cpp
+2
-2
test/test_helper.h
test/test_helper.h
+3
-2
未找到文件。
src/common/types.h
浏览文件 @
79ba1760
...
@@ -80,6 +80,7 @@ static const std::string G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add";
...
@@ -80,6 +80,7 @@ static const std::string G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add";
static
const
std
::
string
G_OP_TYPE_FUSION_CONV_ADD_RELU
=
static
const
std
::
string
G_OP_TYPE_FUSION_CONV_ADD_RELU
=
"fusion_conv_add_relu"
;
"fusion_conv_add_relu"
;
static
const
std
::
string
G_OP_TYPE_FC
=
"fc"
;
static
const
std
::
string
G_OP_TYPE_FC
=
"fc"
;
static
const
std
::
string
G_OP_TYPE_CONV_ADD
=
"conv_add"
;
static
const
std
::
string
G_OP_TYPE_LRN
=
"lrn"
;
static
const
std
::
string
G_OP_TYPE_LRN
=
"lrn"
;
static
const
std
::
string
G_OP_TYPE_MUL
=
"mul"
;
static
const
std
::
string
G_OP_TYPE_MUL
=
"mul"
;
static
const
std
::
string
G_OP_TYPE_MULTICLASS_NMS
=
"multiclass_nms"
;
static
const
std
::
string
G_OP_TYPE_MULTICLASS_NMS
=
"multiclass_nms"
;
...
@@ -115,5 +116,7 @@ static std::unordered_map<
...
@@ -115,5 +116,7 @@ static std::unordered_map<
{{
"PriorBox"
,
"PriorBoxVar"
,
"TargetBox"
},
{
"OutputBox"
}}},
{{
"PriorBox"
,
"PriorBoxVar"
,
"TargetBox"
},
{
"OutputBox"
}}},
{
G_OP_TYPE_PRIOR_BOX
,
{{
"Image"
,
"Input"
},
{
"Boxes"
,
"Variances"
}}},
{
G_OP_TYPE_PRIOR_BOX
,
{{
"Image"
,
"Input"
},
{
"Boxes"
,
"Variances"
}}},
{
G_OP_TYPE_MULTICLASS_NMS
,
{{
"BBoxes"
,
"Scores"
},
{
"Out"
}}},
{
G_OP_TYPE_MULTICLASS_NMS
,
{{
"BBoxes"
,
"Scores"
},
{
"Out"
}}},
{
G_OP_TYPE_RESHAPE
,
{{
"X"
},
{
"Out"
}}}};
{
G_OP_TYPE_RESHAPE
,
{{
"X"
},
{
"Out"
}}},
{
G_OP_TYPE_DEPTHWISE_CONV
,
{{
"Input"
},
{
"Output"
}}}
};
}
// namespace paddle_mobile
}
// namespace paddle_mobile
src/framework/operator.h
浏览文件 @
79ba1760
...
@@ -145,8 +145,8 @@ class FusionOpMatcher : PaddleMobileObject {
...
@@ -145,8 +145,8 @@ class FusionOpMatcher : PaddleMobileObject {
virtual
std
::
string
Type
()
=
0
;
virtual
std
::
string
Type
()
=
0
;
virtual
void
FolderNodes
(
Node
*
node
)
{
virtual
void
FolderNodes
(
Node
*
node
,
std
::
vector
<
std
::
shared_ptr
<
framework
::
Node
>>
*
removed_nodes
)
{
node
->
Folder
(
node_
.
Depth
(),
Type
(),
{});
node
->
Folder
(
node_
.
Depth
(),
Type
(),
{}
,
removed_nodes
);
}
}
virtual
Node
&
BeginNode
()
{
return
node_
;
}
virtual
Node
&
BeginNode
()
{
return
node_
;
}
...
...
src/framework/program/program-optimize/node.cpp
浏览文件 @
79ba1760
...
@@ -236,12 +236,13 @@ uint Node::Depth(uint begin) {
...
@@ -236,12 +236,13 @@ uint Node::Depth(uint begin) {
Node
&
Node
::
Folder
(
Node
&
Node
::
Folder
(
uint
size
,
std
::
string
type
,
uint
size
,
std
::
string
type
,
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
change
)
{
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
change
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
removed_nodes
)
{
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc
=
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc
=
std
::
make_shared
<
framework
::
OpDesc
>
();
std
::
make_shared
<
framework
::
OpDesc
>
();
op_desc
->
inputs_
=
this
->
op_desc_
->
inputs_
;
op_desc
->
inputs_
=
this
->
op_desc_
->
inputs_
;
std
::
vector
<
std
::
shared_ptr
<
Node
>>
outputs
;
std
::
vector
<
std
::
shared_ptr
<
Node
>>
outputs
;
this
->
Folder
(
op_desc
,
&
outputs
,
size
-
1
,
&
change
,
this
);
this
->
Folder
(
op_desc
,
&
outputs
,
size
-
1
,
&
change
,
this
,
removed_nodes
);
this
->
outputs_
=
outputs
;
this
->
outputs_
=
outputs
;
this
->
type_
=
type
;
this
->
type_
=
type
;
this
->
op_desc_
=
op_desc
;
this
->
op_desc_
=
op_desc
;
...
@@ -253,7 +254,8 @@ void Node::Folder(
...
@@ -253,7 +254,8 @@ void Node::Folder(
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc
,
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
outputs
,
uint
index
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
outputs
,
uint
index
,
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
*
change
,
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
*
change
,
Node
*
begin_node
)
{
Node
*
begin_node
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
removed_nodes
)
{
if
(
change
->
find
(
this
->
type_
)
!=
change
->
end
())
{
if
(
change
->
find
(
this
->
type_
)
!=
change
->
end
())
{
auto
change_pair
=
(
*
change
)[
this
->
type_
];
auto
change_pair
=
(
*
change
)[
this
->
type_
];
op_desc
->
GetInputs
()[
change_pair
.
second
]
=
op_desc
->
GetInputs
()[
change_pair
.
second
]
=
...
@@ -266,7 +268,8 @@ void Node::Folder(
...
@@ -266,7 +268,8 @@ void Node::Folder(
if
(
index
>
0
)
{
if
(
index
>
0
)
{
--
index
;
--
index
;
for
(
auto
output
:
outputs_
)
{
for
(
auto
output
:
outputs_
)
{
output
->
Folder
(
op_desc
,
outputs
,
index
,
change
,
begin_node
);
removed_nodes
->
push_back
(
output
);
output
->
Folder
(
op_desc
,
outputs
,
index
,
change
,
begin_node
,
removed_nodes
);
}
}
}
else
{
}
else
{
for
(
auto
&
op_output
:
this
->
op_desc_
->
outputs_
)
{
for
(
auto
&
op_output
:
this
->
op_desc_
->
outputs_
)
{
...
@@ -279,6 +282,7 @@ void Node::Folder(
...
@@ -279,6 +282,7 @@ void Node::Folder(
if
(
iter
!=
output
->
inputs_
.
end
())
{
if
(
iter
!=
output
->
inputs_
.
end
())
{
output
->
inputs_
.
erase
(
iter
);
output
->
inputs_
.
erase
(
iter
);
}
}
output
->
inputs_
.
push_back
(
begin_node
);
output
->
inputs_
.
push_back
(
begin_node
);
outputs
->
push_back
(
output
);
outputs
->
push_back
(
output
);
...
...
src/framework/program/program-optimize/node.h
浏览文件 @
79ba1760
...
@@ -43,7 +43,7 @@ class Node : PaddleMobileObject {
...
@@ -43,7 +43,7 @@ class Node : PaddleMobileObject {
uint
Depth
(
uint
begin
=
0
);
uint
Depth
(
uint
begin
=
0
);
Node
&
Folder
(
Node
&
Folder
(
uint
size
,
std
::
string
type
,
uint
size
,
std
::
string
type
,
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
change_map
);
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
change_map
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
removed_nodes
);
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
OpDescs
(
uint
size
);
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
OpDescs
(
uint
size
);
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
OpDescs
();
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
OpDescs
();
std
::
shared_ptr
<
framework
::
OpDesc
>
OpDescOfNode
()
{
return
op_desc_
;
}
std
::
shared_ptr
<
framework
::
OpDesc
>
OpDescOfNode
()
{
return
op_desc_
;
}
...
@@ -63,7 +63,8 @@ class Node : PaddleMobileObject {
...
@@ -63,7 +63,8 @@ class Node : PaddleMobileObject {
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc
,
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
outputs
,
uint
index
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
outputs
,
uint
index
,
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
*
change
,
std
::
map
<
std
::
string
,
std
::
pair
<
std
::
string
,
std
::
string
>>
*
change
,
Node
*
begin_node
);
Node
*
begin_node
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>
*
removed_nodes
);
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc_
;
std
::
shared_ptr
<
framework
::
OpDesc
>
op_desc_
;
std
::
string
ToString
(
std
::
string
blank
,
const
Node
*
node
)
const
;
std
::
string
ToString
(
std
::
string
blank
,
const
Node
*
node
)
const
;
std
::
vector
<
std
::
shared_ptr
<
Node
>>
outputs_
;
std
::
vector
<
std
::
shared_ptr
<
Node
>>
outputs_
;
...
...
src/framework/program/program-optimize/program_optimize.cpp
浏览文件 @
79ba1760
...
@@ -31,6 +31,9 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
...
@@ -31,6 +31,9 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>>
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
shared_ptr
<
Node
>>>
type_map
;
type_map
;
std
::
vector
<
std
::
shared_ptr
<
Node
>>
nodes
;
std
::
shared_ptr
<
Node
>
begin_node
;
std
::
shared_ptr
<
Node
>
begin_node
;
auto
block
=
optimize_program
->
Block
(
i
);
auto
block
=
optimize_program
->
Block
(
i
);
// DLOG << " ops size: " << block->Ops().size();
// DLOG << " ops size: " << block->Ops().size();
...
@@ -38,11 +41,12 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
...
@@ -38,11 +41,12 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
auto
op
=
block
->
Ops
()[
j
];
auto
op
=
block
->
Ops
()[
j
];
auto
op_type
=
op
->
Type
();
auto
op_type
=
op
->
Type
();
if
(
op_input_output_key
.
find
(
op
->
Type
())
==
op_input_output_key
.
end
())
{
if
(
op_input_output_key
.
find
(
op
->
Type
())
==
op_input_output_key
.
end
())
{
LOG
(
kLOG_ERROR
)
<<
"
return null "
;
LOG
(
kLOG_ERROR
)
<<
"
has not support op return null "
<<
" op type: "
<<
op
->
Type
()
;
return
nullptr
;
return
nullptr
;
}
}
std
::
shared_ptr
<
Node
>
node
=
std
::
make_shared
<
Node
>
(
op
);
std
::
shared_ptr
<
Node
>
node
=
std
::
make_shared
<
Node
>
(
op
);
nodes
.
push_back
(
node
);
//
//
type_map
[
op
->
Type
()].
push_back
(
node
);
type_map
[
op
->
Type
()].
push_back
(
node
);
...
@@ -87,21 +91,29 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
...
@@ -87,21 +91,29 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
// DLOG << " match success " << " fusion node: \n" <<
// DLOG << " match success " << " fusion node: \n" <<
// matcher->BeginNode() << "\nsub node: \n" << *sub_node;
// matcher->BeginNode() << "\nsub node: \n" << *sub_node;
// DLOG << "match node\n"<< *match_node;
// DLOG << "match node\n"<< *match_node;
matcher
->
FolderNodes
(
match_node
.
get
());
// DLOG << " after match node\n"<< *match_node;
// match_node->Description();
// DLOG << "begin node: \n" << *begin_node;
std
::
vector
<
std
::
shared_ptr
<
Node
>>
removed_nodes
;
matcher
->
FolderNodes
(
match_node
.
get
(),
&
removed_nodes
);
for
(
int
j
=
0
;
j
<
removed_nodes
.
size
();
++
j
)
{
auto
removed_node
=
removed_nodes
[
j
];
auto
removed_ite
=
std
::
find
(
nodes
.
begin
(),
nodes
.
end
(),
removed_node
);
nodes
.
erase
(
removed_ite
);
}
}
}
}
}
}
}
//
DLOG << "node: \n" << *begin_node;
//
DLOG << "node: \n" << *begin_node;
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
op_descs
;
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
op_descs
;
// bool can_splite = begin_node->CanSplit({G_OP_TYPE_CONV,
// bool can_splite = begin_node->CanSplit({G_OP_TYPE_CONV,
// G_OP_TYPE_BATCHNORM, G_OP_TYPE_DEPTHWISE_CONV});
// G_OP_TYPE_BATCHNORM, G_OP_TYPE_DEPTHWISE_CONV});
GenerateOps
(
&
op_descs
,
begin_node
.
get
());
for
(
int
m
=
0
;
m
<
nodes
.
size
();
++
m
)
{
auto
&
node
=
nodes
[
m
];
op_descs
.
push_back
(
node
->
op_desc_
);
}
// GenerateOps(&op_descs, begin_node.get());
block
->
ops_
=
op_descs
;
block
->
ops_
=
op_descs
;
}
}
...
@@ -116,8 +128,17 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
...
@@ -116,8 +128,17 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
void
ProgramOptimize
::
GenerateOps
(
void
ProgramOptimize
::
GenerateOps
(
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
*
op_desc
,
Node
*
input_node
,
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
*
op_desc
,
Node
*
input_node
,
Node
*
current_node
)
{
Node
*
current_node
)
{
if
(
current_node
->
inputs_
.
size
()
>
1
&&
if
(
current_node
->
inputs_
.
size
()
>
1
&&
input_node
!=
current_node
->
inputs_
.
back
())
{
input_node
!=
current_node
->
inputs_
.
back
())
{
DLOG
<<
" current type "
<<
current_node
->
type_
;
DLOG
<<
" inputs size of current node > 0 "
;
for
(
int
i
=
0
;
i
<
current_node
->
inputs_
.
size
();
++
i
)
{
DLOG
<<
" input i: "
<<
current_node
->
inputs_
[
i
]
->
type_
;
}
return
;
return
;
}
else
if
(
current_node
->
inputs_
.
size
()
>
1
&&
}
else
if
(
current_node
->
inputs_
.
size
()
>
1
&&
input_node
==
current_node
->
inputs_
.
back
())
{
input_node
==
current_node
->
inputs_
.
back
())
{
...
...
src/io.cpp
浏览文件 @
79ba1760
...
@@ -221,6 +221,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
...
@@ -221,6 +221,8 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
}
}
}
}
// originProgramDesc->Description("program: ");
if
(
optimize
)
{
if
(
optimize
)
{
framework
::
ProgramOptimize
program_optimize
;
framework
::
ProgramOptimize
program_optimize
;
program
.
optimizeProgram
=
program
.
optimizeProgram
=
...
...
src/io.h
浏览文件 @
79ba1760
...
@@ -32,7 +32,7 @@ template <typename Dtype, Precision P = Precision::FP32>
...
@@ -32,7 +32,7 @@ template <typename Dtype, Precision P = Precision::FP32>
class
Loader
:
PaddleMobileObject
{
class
Loader
:
PaddleMobileObject
{
public:
public:
const
framework
::
Program
<
Dtype
,
P
>
Load
(
const
std
::
string
&
dirname
,
const
framework
::
Program
<
Dtype
,
P
>
Load
(
const
std
::
string
&
dirname
,
bool
optimize
=
tru
e
);
bool
optimize
=
fals
e
);
private:
private:
void
LoadVar
(
framework
::
Variable
*
variable
,
void
LoadVar
(
framework
::
Variable
*
variable
,
...
...
src/operators/fusion_conv_add.cpp
0 → 100644
浏览文件 @
79ba1760
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/fusion_conv_add.h"
namespace
paddle_mobile
{
namespace
operators
{
template
<
typename
Dtype
,
typename
T
>
void
FushionConvAddOp
<
Dtype
,
T
>::
InferShape
()
const
{
}
template
class
FushionConvAddOp
<
CPU
,
float
>;
}
// namespace operators
}
// namespace paddle_mobile
namespace
ops
=
paddle_mobile
::
operators
;
USE_OP
(
conv_add
);
REGISTER_OPERATOR
(
conv_add
,
ops
::
FushionConvAddOp
);
src/operators/fusion_conv_add.h
0 → 100644
浏览文件 @
79ba1760
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
namespace
paddle_mobile
{
namespace
operators
{
using
std
::
string
;
using
std
::
vector
;
class
FusionConvAddMatcher
:
public
framework
::
FusionOpMatcher
{
public:
FusionConvAddMatcher
()
{
node_
=
framework
::
Node
(
G_OP_TYPE_CONV
);
node_
>
std
::
make_shared
<
framework
::
Node
>
(
G_OP_TYPE_ELEMENTWISE_ADD
);
}
void
FolderNodes
(
framework
::
Node
*
node
,
std
::
vector
<
std
::
shared_ptr
<
framework
::
Node
>>
*
removed_nodes
)
{
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
origin_descs
=
node
->
OpDescs
(
node_
.
Depth
());
node
->
Folder
(
node_
.
Depth
(),
Type
(),
{{
G_OP_TYPE_ELEMENTWISE_ADD
,
{
"Y"
,
"Y"
}}},
removed_nodes
);
}
std
::
string
Type
()
{
return
G_OP_TYPE_CONV_ADD
;
}
};
template
<
typename
DeviceType
,
typename
T
>
class
FushionConvAddOp
:
public
framework
::
OperatorWithKernel
<
DeviceType
>
{
public:
FushionConvAddOp
(
const
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
attrs
,
std
::
shared_ptr
<
framework
::
Scope
>
scope
)
:
framework
::
OperatorWithKernel
<
DeviceType
>
(
type
,
inputs
,
outputs
,
attrs
,
scope
)
{}
void
RunImpl
()
const
{
}
using
framework
::
OperatorWithKernel
<
DeviceType
>::
OperatorWithKernel
;
void
InferShape
()
const
override
;
protected:
// FushionFcParam param_;
};
//static framework::FusionOpRegistrar fc_registrar(new FusionConvAddMatcher());
}
// namespace operators
}
// namespace paddle_mobile
src/operators/fusion_conv_add_relu_op.h
浏览文件 @
79ba1760
...
@@ -28,11 +28,11 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher {
...
@@ -28,11 +28,11 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher {
std
::
make_shared
<
framework
::
Node
>
(
G_OP_TYPE_RELU
);
std
::
make_shared
<
framework
::
Node
>
(
G_OP_TYPE_RELU
);
}
}
void
FolderNodes
(
framework
::
Node
*
node
)
{
void
FolderNodes
(
framework
::
Node
*
node
,
std
::
vector
<
std
::
shared_ptr
<
framework
::
Node
>>
*
removed_nodes
)
{
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
origin_descs
=
std
::
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
origin_descs
=
node
->
OpDescs
(
node_
.
Depth
());
node
->
OpDescs
(
node_
.
Depth
());
node
->
Folder
(
node_
.
Depth
(),
Type
(),
node
->
Folder
(
node_
.
Depth
(),
Type
(),
{{
G_OP_TYPE_ELEMENTWISE_ADD
,
{
"Y"
,
"Z"
}}});
{{
G_OP_TYPE_ELEMENTWISE_ADD
,
{
"Y"
,
"Z"
}}}
,
removed_nodes
);
}
}
std
::
string
Type
()
{
return
G_OP_TYPE_FUSION_CONV_ADD_RELU
;
}
std
::
string
Type
()
{
return
G_OP_TYPE_FUSION_CONV_ADD_RELU
;
}
};
};
...
...
src/operators/fusion_fc_op.h
浏览文件 @
79ba1760
...
@@ -32,11 +32,11 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
...
@@ -32,11 +32,11 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
node_
>
std
::
make_shared
<
framework
::
Node
>
(
G_OP_TYPE_ELEMENTWISE_ADD
);
node_
>
std
::
make_shared
<
framework
::
Node
>
(
G_OP_TYPE_ELEMENTWISE_ADD
);
}
}
void
FolderNodes
(
framework
::
Node
*
node
)
{
void
FolderNodes
(
framework
::
Node
*
node
,
std
::
vector
<
std
::
shared_ptr
<
framework
::
Node
>>
*
removed_nodes
)
{
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
origin_descs
=
vector
<
std
::
shared_ptr
<
framework
::
OpDesc
>>
origin_descs
=
node
->
OpDescs
(
node_
.
Depth
());
node
->
OpDescs
(
node_
.
Depth
());
node
->
Folder
(
node_
.
Depth
(),
Type
(),
node
->
Folder
(
node_
.
Depth
(),
Type
(),
{{
G_OP_TYPE_ELEMENTWISE_ADD
,
{
"Y"
,
"Z"
}}});
{{
G_OP_TYPE_ELEMENTWISE_ADD
,
{
"Y"
,
"Z"
}}}
,
removed_nodes
);
}
}
std
::
string
Type
()
{
return
G_OP_TYPE_FC
;
}
std
::
string
Type
()
{
return
G_OP_TYPE_FC
;
}
...
@@ -65,7 +65,7 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
...
@@ -65,7 +65,7 @@ class FushionFcOp : public framework::OperatorWithKernel<DeviceType> {
FushionFcParam
param_
;
FushionFcParam
param_
;
};
};
static
framework
::
FusionOpRegistrar
fc_registrar
(
new
FusionFcMatcher
());
//
static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
}
// namespace operators
}
// namespace operators
}
// namespace paddle_mobile
}
// namespace paddle_mobile
test/framework/test_load.cpp
浏览文件 @
79ba1760
...
@@ -19,7 +19,7 @@ int main() {
...
@@ -19,7 +19,7 @@ int main() {
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
// ../../../test/models/googlenet
// ../../../test/models/googlenet
// ../../../test/models/mobilenet
// ../../../test/models/mobilenet
auto
program
=
loader
.
Load
(
g_
googlenet
);
auto
program
=
loader
.
Load
(
g_
mobilenet_ssd
,
true
);
program
.
o
ptimize
Program
->
Description
(
"program desc: "
);
program
.
o
rigin
Program
->
Description
(
"program desc: "
);
return
0
;
return
0
;
}
}
test/framework/test_optimize.cpp
浏览文件 @
79ba1760
...
@@ -12,20 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -12,20 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "io.h"
#include "../test_helper.h"
#include "../test_helper.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/node.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "framework/program/program-optimize/program_optimize.h"
#include "io.h"
int
main
()
{
int
main
()
{
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
// "../../../test/models/googlenet"
// "../../../test/models/googlenet"
auto
program
=
loader
.
Load
(
g_mobilenet_ssd
);
auto
program
=
loader
.
Load
(
g_mobilenet_ssd
,
true
);
paddle_mobile
::
framework
::
ProgramOptimize
optimize
;
paddle_mobile
::
framework
::
ProgramOptimize
optimize
;
// program.originProgram->Description("origin");
// program.originProgram->Description("origin");
auto
optimize_program
=
optimize
.
FushionOptimize
(
program
.
originProgram
);
auto
optimize_program
=
optimize
.
FushionOptimize
(
program
.
originProgram
);
if
(
optimize_program
!=
nullptr
)
{
if
(
optimize_program
!=
nullptr
)
{
optimize_program
->
Description
(
"optimize"
);
//
optimize_program->Description("optimize");
}
else
{
}
else
{
LOG
(
paddle_mobile
::
kLOG_ERROR
)
<<
"optimize_program is null"
;
LOG
(
paddle_mobile
::
kLOG_ERROR
)
<<
"optimize_program is null"
;
}
}
...
...
test/net/test_mobilenet+ssd.cpp
浏览文件 @
79ba1760
...
@@ -19,10 +19,10 @@ limitations under the License. */
...
@@ -19,10 +19,10 @@ limitations under the License. */
int
main
()
{
int
main
()
{
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
paddle_mobile
::
Loader
<
paddle_mobile
::
CPU
>
loader
;
auto
time1
=
time
();
auto
time1
=
time
();
auto
program
=
loader
.
Load
(
g_mobilenet_ssd
,
fals
e
);
auto
program
=
loader
.
Load
(
g_mobilenet_ssd
,
tru
e
);
auto
time2
=
time
();
auto
time2
=
time
();
DLOG
<<
"load cost :"
<<
time_diff
(
time1
,
time1
)
<<
"ms"
;
DLOG
<<
"load cost :"
<<
time_diff
(
time1
,
time1
)
<<
"ms"
;
paddle_mobile
::
Executor
<
paddle_mobile
::
CPU
>
executor
(
program
,
1
,
fals
e
);
paddle_mobile
::
Executor
<
paddle_mobile
::
CPU
>
executor
(
program
,
1
,
tru
e
);
std
::
vector
<
int64_t
>
dims
{
1
,
3
,
300
,
300
};
std
::
vector
<
int64_t
>
dims
{
1
,
3
,
300
,
300
};
Tensor
input_tensor
;
Tensor
input_tensor
;
...
...
test/test_helper.h
浏览文件 @
79ba1760
...
@@ -22,10 +22,11 @@ limitations under the License. */
...
@@ -22,10 +22,11 @@ limitations under the License. */
#include "framework/ddim.h"
#include "framework/ddim.h"
#include "framework/tensor.h"
#include "framework/tensor.h"
static
const
std
::
string
g_googlenet
=
"../models/googlenet"
;
static
const
std
::
string
g_mobilenet
=
"../models/mobilenet"
;
static
const
std
::
string
g_mobilenet_ssd
=
"../models/mobilenet+ssd"
;
static
const
std
::
string
g_mobilenet_ssd
=
"../models/mobilenet+ssd"
;
static
const
std
::
string
g_squeezenet
=
"../models/squeezenet"
;
static
const
std
::
string
g_squeezenet
=
"../models/squeezenet"
;
static
const
std
::
string
g_googlenet
=
"../models/googlenet"
;
static
const
std
::
string
g_mobilenet
=
"../models/mobilenet"
;
static
const
std
::
string
g_resnet_50
=
"../models/resnet_50"
;
static
const
std
::
string
g_resnet
=
"../models/resnet"
;
static
const
std
::
string
g_resnet
=
"../models/resnet"
;
static
const
std
::
string
g_yolo
=
"../models/yolo"
;
static
const
std
::
string
g_yolo
=
"../models/yolo"
;
static
const
std
::
string
g_test_image_1x3x224x224
=
static
const
std
::
string
g_test_image_1x3x224x224
=
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录