Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
089cc11d
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
089cc11d
编写于
10月 09, 2017
作者:
Y
Yang Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean up && fix #4624
上级
c93d74aa
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
68 addition
and
104 deletion
+68
-104
paddle/framework/block_desc.cc
paddle/framework/block_desc.cc
+6
-0
paddle/framework/executor.cc
paddle/framework/executor.cc
+13
-24
paddle/framework/executor_test.cc
paddle/framework/executor_test.cc
+49
-80
未找到文件。
paddle/framework/block_desc.cc
浏览文件 @
089cc11d
...
@@ -74,6 +74,12 @@ void BlockDescBind::Sync() {
...
@@ -74,6 +74,12 @@ void BlockDescBind::Sync() {
for
(
auto
&
op_desc
:
ops_
)
{
for
(
auto
&
op_desc
:
ops_
)
{
op_field
.
AddAllocated
(
op_desc
->
Proto
());
op_field
.
AddAllocated
(
op_desc
->
Proto
());
}
}
auto
&
var_field
=
*
this
->
desc_
->
mutable_vars
();
var_field
.
Clear
();
var_field
.
Reserve
(
static_cast
<
int
>
(
vars_
.
size
()));
for
(
auto
&
var_desc
:
vars_
)
{
var_field
.
AddAllocated
(
var_desc
.
second
->
Proto
());
}
need_update_
=
false
;
need_update_
=
false
;
}
}
}
}
...
...
paddle/framework/executor.cc
浏览文件 @
089cc11d
...
@@ -54,39 +54,33 @@ Executor::~Executor() {
...
@@ -54,39 +54,33 @@ Executor::~Executor() {
void
Executor
::
Run
(
const
ProgramDesc
&
pdesc
,
Scope
*
scope
)
{
void
Executor
::
Run
(
const
ProgramDesc
&
pdesc
,
Scope
*
scope
)
{
// TODO(tonyyang-svail):
// TODO(tonyyang-svail):
// - only runs the first block
// - only runs the first block (i.e. no RNN support)
// - only runs on the first device
// - only runs on the first device (i.e. no interdevice communication)
// - test on gpu
auto
&
block
=
pdesc
.
blocks
(
0
);
auto
&
block
=
pdesc
.
blocks
(
0
);
auto
&
device
=
device_contexts_
[
0
];
auto
&
device
=
device_contexts_
[
0
];
// TODO(tonyyang-svail):
// Instantiate all the vars in the global scope
// - runs on a new local scope
// Scope& local_scope = scope->NewScope();
for
(
auto
&
var
:
block
.
vars
())
{
for
(
auto
&
var
:
block
.
vars
())
{
scope
->
NewVar
(
var
.
name
());
scope
->
NewVar
(
var
.
name
());
}
}
Scope
&
local_scope
=
scope
->
NewScope
();
std
::
vector
<
bool
>
should_run
=
Preprocess
(
pdesc
);
std
::
vector
<
bool
>
should_run
=
Preprocess
(
pdesc
);
PADDLE_ENFORCE
(
should_run
.
size
()
==
block
.
ops_size
());
PADDLE_ENFORCE
(
should_run
.
size
()
==
block
.
ops_size
());
for
(
size_t
i
=
0
;
i
<
should_run
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
should_run
.
size
();
++
i
)
{
if
(
should_run
[
i
])
{
if
(
should_run
[
i
])
{
for
(
auto
var
:
block
.
ops
(
i
).
outputs
())
{
for
(
auto
argu
:
var
.
arguments
())
{
if
(
local_scope
.
FindVar
(
argu
)
==
nullptr
)
{
local_scope
.
NewVar
(
argu
);
}
}
}
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
block
.
ops
(
i
));
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
block
.
ops
(
i
));
op
->
Run
(
*
scope
,
*
device
);
op
->
Run
(
local_
scope
,
*
device
);
}
}
}
}
// // print tensor value
// for (auto& var : block.vars()) {
// std::cout << var.name() << std::endl;
// auto v = scope->FindVar(var.name());
// const LoDTensor& t = v->Get<LoDTensor>();
// for (int i = 0; i < t.numel(); ++i) {
// std::cout << t.data<float>()[i] << " ";
// }
// std::cout << std::endl;
// }
}
}
std
::
vector
<
bool
>
Executor
::
Preprocess
(
const
ProgramDesc
&
pdesc
)
{
std
::
vector
<
bool
>
Executor
::
Preprocess
(
const
ProgramDesc
&
pdesc
)
{
...
@@ -125,7 +119,6 @@ std::vector<bool> Executor::Preprocess(const ProgramDesc& pdesc) {
...
@@ -125,7 +119,6 @@ std::vector<bool> Executor::Preprocess(const ProgramDesc& pdesc) {
}
}
}
}
// TODO(tonyyang-svail): add VLOG here for debugging
if
(
op_desc
.
type
()
==
"fetch"
||
found_dependent_vars
)
{
if
(
op_desc
.
type
()
==
"fetch"
||
found_dependent_vars
)
{
// erase its output to the dependency graph
// erase its output to the dependency graph
for
(
auto
&
var
:
op_desc
.
outputs
())
{
for
(
auto
&
var
:
op_desc
.
outputs
())
{
...
@@ -141,13 +134,9 @@ std::vector<bool> Executor::Preprocess(const ProgramDesc& pdesc) {
...
@@ -141,13 +134,9 @@ std::vector<bool> Executor::Preprocess(const ProgramDesc& pdesc) {
}
}
}
}
// this op should be executed
should_run
.
push_back
(
true
);
should_run
.
push_back
(
true
);
LOG
(
INFO
)
<<
"Yes "
<<
op_desc
.
type
();
}
else
{
}
else
{
// this op should NOT be executed
should_run
.
push_back
(
false
);
should_run
.
push_back
(
false
);
LOG
(
INFO
)
<<
"No "
<<
op_desc
.
type
();
}
}
}
}
...
...
paddle/framework/executor_test.cc
浏览文件 @
089cc11d
...
@@ -18,7 +18,7 @@ limitations under the License. */
...
@@ -18,7 +18,7 @@ limitations under the License. */
#include "paddle/framework/attribute.h"
#include "paddle/framework/attribute.h"
#include "paddle/framework/backward.h"
#include "paddle/framework/backward.h"
#include "paddle/framework/block_desc.h"
#include "paddle/framework/block_desc.h"
#include "paddle/framework/grad_op_builder.h"
//
#include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_desc.h"
#include "paddle/framework/op_desc.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/operator.h"
...
@@ -37,68 +37,27 @@ using namespace paddle::framework;
...
@@ -37,68 +37,27 @@ using namespace paddle::framework;
typedef
paddle
::
framework
::
BlockDesc
proto_block
;
typedef
paddle
::
framework
::
BlockDesc
proto_block
;
typedef
paddle
::
framework
::
OpDesc
proto_op
;
typedef
paddle
::
framework
::
OpDesc
proto_op
;
struct
SetAttrDescVisitor
:
public
boost
::
static_visitor
<
void
>
{
explicit
SetAttrDescVisitor
(
OpDesc
::
Attr
*
attr
)
:
attr_
(
attr
)
{}
mutable
OpDesc
::
Attr
*
attr_
;
void
operator
()(
int
v
)
const
{
attr_
->
set_i
(
v
);
}
void
operator
()(
float
v
)
const
{
attr_
->
set_f
(
v
);
}
void
operator
()(
const
std
::
string
&
v
)
const
{
attr_
->
set_s
(
v
);
}
void
operator
()(
bool
b
)
const
{
attr_
->
set_b
(
b
);
}
void
operator
()(
const
std
::
vector
<
int
>&
v
)
const
{
VectorToRepeated
(
v
,
attr_
->
mutable_ints
());
}
void
operator
()(
const
std
::
vector
<
float
>&
v
)
const
{
VectorToRepeated
(
v
,
attr_
->
mutable_floats
());
}
void
operator
()(
const
std
::
vector
<
std
::
string
>&
v
)
const
{
VectorToRepeated
(
v
,
attr_
->
mutable_strings
());
}
void
operator
()(
const
std
::
vector
<
bool
>&
v
)
const
{
VectorToRepeated
(
v
,
attr_
->
mutable_bools
());
}
void
operator
()(
BlockDesc
*
desc
)
const
{
attr_
->
set_block_idx
(
desc
->
idx
());
}
void
operator
()(
boost
::
blank
)
const
{
PADDLE_THROW
(
"Unexpected branch"
);
}
};
void
AddOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
void
AddOp
(
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
AttributeMap
attrs
,
const
VariableNameMap
&
outputs
,
AttributeMap
attrs
,
p
roto_block
*
block
)
{
p
addle
::
framework
::
BlockDescBind
*
block
)
{
// insert output
// insert output
for
(
auto
kv
:
outputs
)
{
for
(
auto
kv
:
outputs
)
{
for
(
auto
v
:
kv
.
second
)
{
for
(
auto
v
:
kv
.
second
)
{
auto
var
=
block
->
add_vars
();
auto
var
=
block
->
NewVar
(
v
);
var
->
set_name
(
v
);
var
->
SetDataType
(
paddle
::
framework
::
DataType
::
FP32
);
auto
var_lt
=
var
->
mutable_lod_tensor
();
var_lt
->
set_data_type
(
paddle
::
framework
::
DataType
::
FP32
);
}
}
}
}
// insert op
// insert op
auto
op
=
block
->
add_ops
();
auto
op
=
block
->
AppendOp
();
op
->
set_t
ype
(
type
);
op
->
SetT
ype
(
type
);
for
(
auto
kv
:
inputs
)
{
for
(
auto
kv
:
inputs
)
{
auto
X
=
op
->
add_inputs
();
op
->
SetInput
(
kv
.
first
,
kv
.
second
);
X
->
set_parameter
(
kv
.
first
);
for
(
auto
argu
:
kv
.
second
)
{
X
->
add_arguments
(
argu
);
}
}
}
for
(
auto
kv
:
outputs
)
{
for
(
auto
kv
:
outputs
)
{
auto
X
=
op
->
add_outputs
();
op
->
SetOutput
(
kv
.
first
,
kv
.
second
);
X
->
set_parameter
(
kv
.
first
);
for
(
auto
argu
:
kv
.
second
)
{
X
->
add_arguments
(
argu
);
}
}
for
(
auto
&
attr
:
attrs
)
{
auto
*
attr_desc
=
op
->
add_attrs
();
attr_desc
->
set_name
(
attr
.
first
);
attr_desc
->
set_type
(
static_cast
<
paddle
::
framework
::
AttrType
>
(
attr
.
second
.
which
()
-
1
));
SetAttrDescVisitor
visitor
(
attr_desc
);
boost
::
apply_visitor
(
visitor
,
attr
.
second
);
}
}
op
->
SetAttrMap
(
attrs
);
}
}
std
::
once_flag
set_variable_flag
;
std
::
once_flag
set_variable_flag
;
...
@@ -146,10 +105,16 @@ class ExecutorTesterRandom : public ::testing::Test {
...
@@ -146,10 +105,16 @@ class ExecutorTesterRandom : public ::testing::Test {
virtual
void
SetUp
()
override
{
virtual
void
SetUp
()
override
{
int
input_dim
=
5
,
batch_size
=
2
,
embed_dim
=
5
;
int
input_dim
=
5
,
batch_size
=
2
,
embed_dim
=
5
;
// init pdesc
// init pdesc -----------------------------------------
auto
init_root_block
=
init_pdesc_
.
add_blocks
();
auto
temp_init_root_block
=
init_pdesc_
.
add_blocks
();
init_root_block
->
set_idx
(
0
);
temp_init_root_block
->
set_idx
(
0
);
init_root_block
->
set_parent_idx
(
-
1
);
temp_init_root_block
->
set_parent_idx
(
-
1
);
// wrap to BlockDescBind
paddle
::
framework
::
ProgramDescBind
&
init_program
=
paddle
::
framework
::
ProgramDescBind
::
Instance
(
&
init_pdesc_
);
paddle
::
framework
::
BlockDescBind
*
init_root_block
=
init_program
.
Block
(
0
);
AddOp
(
"gaussian_random"
,
{},
{{
"Out"
,
{
"w1"
}}},
AddOp
(
"gaussian_random"
,
{},
{{
"Out"
,
{
"w1"
}}},
{{
"dims"
,
std
::
vector
<
int
>
{
input_dim
,
embed_dim
}}},
init_root_block
);
{{
"dims"
,
std
::
vector
<
int
>
{
input_dim
,
embed_dim
}}},
init_root_block
);
AddOp
(
"gaussian_random"
,
{},
{{
"Out"
,
{
"w2"
}}},
AddOp
(
"gaussian_random"
,
{},
{{
"Out"
,
{
"w2"
}}},
...
@@ -160,11 +125,18 @@ class ExecutorTesterRandom : public ::testing::Test {
...
@@ -160,11 +125,18 @@ class ExecutorTesterRandom : public ::testing::Test {
AddOp
(
"fetch"
,
{{
"Input"
,
{
"w2"
}}},
{},
AddOp
(
"fetch"
,
{{
"Input"
,
{
"w2"
}}},
{},
{{
"dims"
,
std
::
vector
<
int
>
{
embed_dim
,
input_dim
}},
{
"col"
,
1
}},
{{
"dims"
,
std
::
vector
<
int
>
{
embed_dim
,
input_dim
}},
{
"col"
,
1
}},
init_root_block
);
init_root_block
);
// flush
init_program
.
Proto
();
// run pdesc -----------------------------------------
auto
temp_root_block
=
pdesc_
.
add_blocks
();
temp_root_block
->
set_idx
(
0
);
temp_root_block
->
set_parent_idx
(
-
1
);
//
run pdesc
//
wrap to BlockDescBind
auto
root_block
=
pdesc_
.
add_blocks
();
paddle
::
framework
::
ProgramDescBind
&
program
=
root_block
->
set_idx
(
0
);
paddle
::
framework
::
ProgramDescBind
::
Instance
(
&
pdesc_
);
root_block
->
set_parent_idx
(
-
1
);
paddle
::
framework
::
BlockDescBind
*
root_block
=
program
.
Block
(
0
);
AddOp
(
"gaussian_random"
,
{},
{{
"Out"
,
{
"a"
}}},
AddOp
(
"gaussian_random"
,
{},
{{
"Out"
,
{
"a"
}}},
{{
"dims"
,
std
::
vector
<
int
>
{
batch_size
,
input_dim
}}},
root_block
);
{{
"dims"
,
std
::
vector
<
int
>
{
batch_size
,
input_dim
}}},
root_block
);
...
@@ -175,13 +147,16 @@ class ExecutorTesterRandom : public ::testing::Test {
...
@@ -175,13 +147,16 @@ class ExecutorTesterRandom : public ::testing::Test {
AddOp
(
"squared_l2_distance"
,
{{
"X"
,
{
"a"
}},
{
"Y"
,
{
"a_out"
}}},
AddOp
(
"squared_l2_distance"
,
{{
"X"
,
{
"a"
}},
{
"Y"
,
{
"a_out"
}}},
{{
"Out"
,
{
"l2_distance"
}},
{
"sub_result"
,
{
"l2_distance_sub"
}}},
{},
{{
"Out"
,
{
"l2_distance"
}},
{
"sub_result"
,
{
"l2_distance_sub"
}}},
{},
root_block
);
root_block
);
AppendBackward
(
pdesc_
,
{});
// AddOp("fetch", {{"Input", {"sub_result"}}}, {},
// {{"dims", std::vector<int>{input_dim, batch_size}}, {"col", 0}},
// root_block);
AddOp
(
"fetch"
,
{{
"Input"
,
{
"l2_distance"
}}},
{},
AddOp
(
"fetch"
,
{{
"Input"
,
{
"l2_distance"
}}},
{},
{{
"dims"
,
std
::
vector
<
int
>
{
batch_size
}},
{
"col"
,
1
}},
root_block
);
{{
"dims"
,
std
::
vector
<
int
>
{
batch_size
}},
{
"col"
,
1
}},
root_block
);
// flush
program
.
Proto
();
// TODO(tonyyang-svail):
// - Test with Backward
// AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}},
// {{"dims", std::vector<int>{batch_size, 1}}}, root_block);
// AppendBackward(program, {});
}
}
protected:
protected:
...
@@ -192,9 +167,14 @@ class ExecutorTesterRandom : public ::testing::Test {
...
@@ -192,9 +167,14 @@ class ExecutorTesterRandom : public ::testing::Test {
class
ExecutorTesterFeedAndFetch
:
public
::
testing
::
Test
{
class
ExecutorTesterFeedAndFetch
:
public
::
testing
::
Test
{
public:
public:
virtual
void
SetUp
()
override
{
virtual
void
SetUp
()
override
{
auto
root_block
=
pdesc_
.
add_blocks
();
auto
temp_root_block
=
pdesc_
.
add_blocks
();
root_block
->
set_idx
(
0
);
temp_root_block
->
set_idx
(
0
);
root_block
->
set_parent_idx
(
-
1
);
temp_root_block
->
set_parent_idx
(
-
1
);
// wrap to BlockDescBind
paddle
::
framework
::
ProgramDescBind
&
program
=
paddle
::
framework
::
ProgramDescBind
::
Instance
(
&
pdesc_
);
paddle
::
framework
::
BlockDescBind
*
root_block
=
program
.
Block
(
0
);
std
::
vector
<
int
>
dim
{
6
};
std
::
vector
<
int
>
dim
{
6
};
...
@@ -207,6 +187,9 @@ class ExecutorTesterFeedAndFetch : public ::testing::Test {
...
@@ -207,6 +187,9 @@ class ExecutorTesterFeedAndFetch : public ::testing::Test {
AddOp
(
"fetch"
,
{{
"Input"
,
{
"b"
}}},
{},
{{
"dims"
,
dim
},
{
"col"
,
1
}},
AddOp
(
"fetch"
,
{{
"Input"
,
{
"b"
}}},
{},
{{
"dims"
,
dim
},
{
"col"
,
1
}},
root_block
);
root_block
);
// flush
program
.
Proto
();
std
::
vector
<
float
>
vec1
=
{
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
};
std
::
vector
<
float
>
vec1
=
{
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
};
std
::
vector
<
float
>
vec2
=
{
4.0
,
5.0
,
6.0
,
7.0
,
8.0
,
9.0
};
std
::
vector
<
float
>
vec2
=
{
4.0
,
5.0
,
6.0
,
7.0
,
8.0
,
9.0
};
inputs_
.
push_back
(
vec1
);
inputs_
.
push_back
(
vec1
);
...
@@ -235,12 +218,6 @@ TEST_F(ExecutorTesterRandom, CPU) {
...
@@ -235,12 +218,6 @@ TEST_F(ExecutorTesterRandom, CPU) {
executor
->
Run
(
pdesc_
,
GetGlobalScope
());
executor
->
Run
(
pdesc_
,
GetGlobalScope
());
std
::
vector
<
std
::
vector
<
float
>>
result
=
get_fetch_variable
<
float
>
();
std
::
vector
<
std
::
vector
<
float
>>
result
=
get_fetch_variable
<
float
>
();
for
(
auto
&
vec
:
result
)
{
for
(
auto
&
num
:
vec
)
{
std
::
cout
<<
num
<<
" "
;
}
std
::
cout
<<
std
::
endl
;
}
delete
executor
;
delete
executor
;
}
}
...
@@ -290,18 +267,10 @@ TEST_F(ExecutorTesterRandom, GPU) {
...
@@ -290,18 +267,10 @@ TEST_F(ExecutorTesterRandom, GPU) {
Executor
*
executor
=
new
Executor
(
places
);
Executor
*
executor
=
new
Executor
(
places
);
LOG
(
INFO
)
<<
"Run Init"
;
executor
->
Run
(
init_pdesc_
,
GetGlobalScope
());
executor
->
Run
(
init_pdesc_
,
GetGlobalScope
());
LOG
(
INFO
)
<<
"Run"
;
executor
->
Run
(
pdesc_
,
GetGlobalScope
());
executor
->
Run
(
pdesc_
,
GetGlobalScope
());
std
::
vector
<
std
::
vector
<
float
>>
result
=
get_fetch_variable
<
float
>
();
std
::
vector
<
std
::
vector
<
float
>>
result
=
get_fetch_variable
<
float
>
();
for
(
auto
&
vec
:
result
)
{
for
(
auto
&
num
:
vec
)
{
std
::
cout
<<
num
<<
" "
;
}
std
::
cout
<<
std
::
endl
;
}
delete
executor
;
delete
executor
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录