Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
08e2a5d6
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
08e2a5d6
编写于
1月 09, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish tracer code
test=develop
上级
cded2476
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
148 addition
and
133 deletion
+148
-133
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+142
-1
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+3
-130
paddle/fluid/pybind/CMakeLists.txt
paddle/fluid/pybind/CMakeLists.txt
+3
-2
未找到文件。
paddle/fluid/imperative/tracer.cc
浏览文件 @
08e2a5d6
...
...
@@ -15,5 +15,146 @@
#include "paddle/fluid/imperative/tracer.h"
namespace
paddle
{
namespace
imperative
{}
// namespace imperative
namespace
imperative
{
void
CreateGradOp
(
const
framework
::
OpDesc
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>&
no_grad_set
,
const
std
::
vector
<
framework
::
BlockDesc
*>&
grad_sub_block
,
framework
::
OpDesc
**
grad_op_desc
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
)
{
std
::
vector
<
std
::
unique_ptr
<
framework
::
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
grad_to_var
,
grad_sub_block
);
PADDLE_ENFORCE
(
grad_op_descs
.
size
()
==
1
,
"Only support 1 grad op now."
);
// TODO(panyx0718): Leak?
*
grad_op_desc
=
grad_op_descs
[
0
].
release
();
}
void
InitVar
(
framework
::
Variable
*
var
,
framework
::
Variable
*
grad_var
)
{
auto
&
var_t
=
var
->
Get
<
framework
::
LoDTensor
>
();
float
*
data
=
grad_var
->
GetMutable
<
framework
::
LoDTensor
>
()
->
mutable_data
<
float
>
(
var_t
.
dims
(),
platform
::
CPUPlace
());
std
::
fill
(
data
,
data
+
var_t
.
numel
(),
0.0
);
}
void
Tracer
::
Trace
(
OpBase
*
op
,
const
VarBasePtrMap
&
inputs
,
const
VarBasePtrMap
&
outputs
,
framework
::
BlockDesc
*
block
,
const
bool
stop_gradient
)
{
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
framework
::
VariableValueMap
invars_map
;
framework
::
VariableValueMap
outvars_map
;
op
->
input_vars_
=
inputs
;
for
(
auto
it
:
op
->
input_vars_
)
{
auto
&
invars
=
invars_map
[
it
.
first
];
for
(
VarBase
*
inp
:
it
.
second
)
{
PADDLE_ENFORCE_NOT_NULL
(
inp
->
var_
.
get
(),
"op %s input %s nullptr"
,
op
->
op_desc_
->
Type
(),
inp
->
var_desc_
->
Name
());
invars
.
push_back
(
inp
->
var_
.
get
());
vars
[
inp
->
var_desc_
->
Name
()]
=
inp
;
if
(
inp
->
pre_op_
)
{
op
->
pre_ops_
[
it
.
first
].
push_back
(
inp
->
pre_op_
);
op
->
pre_ops_out_idx_
[
it
.
first
].
push_back
(
inp
->
pre_op_out_idx_
);
}
else
{
op
->
pre_ops_
[
it
.
first
].
push_back
(
nullptr
);
}
VLOG
(
3
)
<<
"input vname "
<<
inp
->
var_desc_
->
Name
()
<<
" "
<<
inp
->
var_
->
IsInitialized
();
}
}
op
->
output_vars_
=
outputs
;
for
(
auto
it
:
op
->
output_vars_
)
{
auto
&
outvars
=
outvars_map
[
it
.
first
];
const
std
::
vector
<
VarBase
*>&
outputs
=
it
.
second
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
VarBase
*
out
=
outputs
[
i
];
outvars
.
push_back
(
out
->
var_
.
get
());
vars
[
out
->
var_desc_
->
Name
()]
=
out
;
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
out
->
var_desc_
->
Name
());
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
out
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
out
->
stop_gradient_
=
stop_gradient
;
out
->
pre_op_
=
op
;
out
->
pre_op_out_name_
=
it
.
first
;
out
->
pre_op_out_idx_
=
i
;
VLOG
(
3
)
<<
"output vname "
<<
out
->
var_desc_
->
Name
()
<<
" "
<<
out
->
var_
->
IsInitialized
();
}
}
VLOG
(
3
)
<<
"tracer running "
<<
op_desc
->
Type
();
framework
::
RuntimeContext
ctx
(
invars_map
,
outvars_map
);
// TODO(panyx0718): Cache p.
framework
::
OperatorWithKernel
*
op_kernel
=
dynamic_cast
<
framework
::
OperatorWithKernel
*>
(
op_base
.
get
());
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
framework
::
Scope
scope
;
platform
::
CPUPlace
place
;
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
if
(
!
stop_gradient
)
{
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
for
(
auto
it
:
grad_op_desc
->
Inputs
())
{
auto
&
grad_in_vars
=
op
->
grad_input_vars_
[
it
.
first
];
for
(
const
std
::
string
&
grad_invar
:
it
.
second
)
{
block
->
FindRecursiveOrCreateVar
(
grad_invar
);
auto
var_it
=
grad_to_var
->
find
(
grad_invar
);
if
(
var_it
==
grad_to_var
->
end
())
{
auto
fwd_var_it
=
vars
.
find
(
grad_invar
);
PADDLE_ENFORCE
(
fwd_var_it
!=
vars
.
end
());
grad_in_vars
.
push_back
(
fwd_var_it
->
second
->
var_
.
get
());
}
else
{
VarBase
*
var
=
vars
[
var_it
->
second
];
if
(
!
var
->
grads_
->
var_
->
IsInitialized
())
{
InitVar
(
var
->
var_
.
get
(),
var
->
grads_
->
var_
.
get
());
}
grad_in_vars
.
push_back
(
var
->
grads_
->
var_
.
get
());
}
}
}
for
(
auto
it
:
grad_op_desc
->
Outputs
())
{
auto
&
grad_out_vars
=
op
->
grad_output_vars_
[
it
.
first
];
for
(
const
std
::
string
&
grad_outvar
:
it
.
second
)
{
block
->
FindRecursiveOrCreateVar
(
grad_outvar
);
auto
var_it
=
grad_to_var
->
find
(
grad_outvar
);
PADDLE_ENFORCE
(
var_it
!=
grad_to_var
->
end
());
VarBase
*
var
=
vars
[
var_it
->
second
];
if
(
!
var
->
grads_
->
var_
->
IsInitialized
())
{
InitVar
(
var
->
var_
.
get
(),
var
->
grads_
->
var_
.
get
());
}
grad_out_vars
.
push_back
(
var
->
grads_
->
var_
.
get
());
}
}
}
op
->
block_
=
block
;
}
}
// namespace imperative
}
// namespace paddle
paddle/fluid/imperative/tracer.h
浏览文件 @
08e2a5d6
...
...
@@ -30,23 +30,9 @@ void CreateGradOp(const framework::OpDesc& op_desc,
const
std
::
unordered_set
<
std
::
string
>&
no_grad_set
,
const
std
::
vector
<
framework
::
BlockDesc
*>&
grad_sub_block
,
framework
::
OpDesc
**
grad_op_desc
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
)
{
std
::
vector
<
std
::
unique_ptr
<
framework
::
OpDesc
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
.
GradOpMaker
()(
op_desc
,
no_grad_set
,
grad_to_var
,
grad_sub_block
);
PADDLE_ENFORCE
(
grad_op_descs
.
size
()
==
1
,
"Only support 1 grad op now."
);
// TODO(panyx0718): Leak?
*
grad_op_desc
=
grad_op_descs
[
0
].
release
();
}
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
);
void
InitVar
(
framework
::
Variable
*
var
,
framework
::
Variable
*
grad_var
)
{
auto
&
var_t
=
var
->
Get
<
framework
::
LoDTensor
>
();
float
*
data
=
grad_var
->
GetMutable
<
framework
::
LoDTensor
>
()
->
mutable_data
<
float
>
(
var_t
.
dims
(),
platform
::
CPUPlace
());
std
::
fill
(
data
,
data
+
var_t
.
numel
(),
0.0
);
}
void
InitVar
(
framework
::
Variable
*
var
,
framework
::
Variable
*
grad_var
);
class
Tracer
{
public:
...
...
@@ -57,120 +43,7 @@ class Tracer {
void
Trace
(
OpBase
*
op
,
const
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>&
inputs
,
const
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>&
outputs
,
framework
::
BlockDesc
*
block
,
const
bool
stop_gradient
=
false
)
{
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
framework
::
VariableValueMap
invars_map
;
framework
::
VariableValueMap
outvars_map
;
op
->
input_vars_
=
inputs
;
for
(
auto
it
:
op
->
input_vars_
)
{
auto
&
invars
=
invars_map
[
it
.
first
];
for
(
VarBase
*
inp
:
it
.
second
)
{
PADDLE_ENFORCE_NOT_NULL
(
inp
->
var_
.
get
(),
"op %s input %s nullptr"
,
op
->
op_desc_
->
Type
(),
inp
->
var_desc_
->
Name
());
invars
.
push_back
(
inp
->
var_
.
get
());
vars
[
inp
->
var_desc_
->
Name
()]
=
inp
;
if
(
inp
->
pre_op_
)
{
op
->
pre_ops_
[
it
.
first
].
push_back
(
inp
->
pre_op_
);
op
->
pre_ops_out_idx_
[
it
.
first
].
push_back
(
inp
->
pre_op_out_idx_
);
}
else
{
op
->
pre_ops_
[
it
.
first
].
push_back
(
nullptr
);
}
VLOG
(
3
)
<<
"input vname "
<<
inp
->
var_desc_
->
Name
()
<<
" "
<<
inp
->
var_
->
IsInitialized
();
}
}
op
->
output_vars_
=
outputs
;
for
(
auto
it
:
op
->
output_vars_
)
{
auto
&
outvars
=
outvars_map
[
it
.
first
];
const
std
::
vector
<
VarBase
*>&
outputs
=
it
.
second
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
VarBase
*
out
=
outputs
[
i
];
outvars
.
push_back
(
out
->
var_
.
get
());
vars
[
out
->
var_desc_
->
Name
()]
=
out
;
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
out
->
var_desc_
->
Name
());
if
(
var_desc
->
GetType
()
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
out
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
}
else
{
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
out
->
stop_gradient_
=
stop_gradient
;
out
->
pre_op_
=
op
;
out
->
pre_op_out_name_
=
it
.
first
;
out
->
pre_op_out_idx_
=
i
;
VLOG
(
3
)
<<
"output vname "
<<
out
->
var_desc_
->
Name
()
<<
" "
<<
out
->
var_
->
IsInitialized
();
}
}
VLOG
(
3
)
<<
"tracer running "
<<
op_desc
->
Type
();
framework
::
RuntimeContext
ctx
(
invars_map
,
outvars_map
);
// TODO(panyx0718): Cache p.
framework
::
OperatorWithKernel
*
op_kernel
=
dynamic_cast
<
framework
::
OperatorWithKernel
*>
(
op_base
.
get
());
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
framework
::
Scope
scope
;
platform
::
CPUPlace
place
;
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
if
(
!
stop_gradient
)
{
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
for
(
auto
it
:
grad_op_desc
->
Inputs
())
{
auto
&
grad_in_vars
=
op
->
grad_input_vars_
[
it
.
first
];
for
(
const
std
::
string
&
grad_invar
:
it
.
second
)
{
block
->
FindRecursiveOrCreateVar
(
grad_invar
);
auto
var_it
=
grad_to_var
->
find
(
grad_invar
);
if
(
var_it
==
grad_to_var
->
end
())
{
auto
fwd_var_it
=
vars
.
find
(
grad_invar
);
PADDLE_ENFORCE
(
fwd_var_it
!=
vars
.
end
());
grad_in_vars
.
push_back
(
fwd_var_it
->
second
->
var_
.
get
());
}
else
{
VarBase
*
var
=
vars
[
var_it
->
second
];
if
(
!
var
->
grads_
->
var_
->
IsInitialized
())
{
InitVar
(
var
->
var_
.
get
(),
var
->
grads_
->
var_
.
get
());
}
grad_in_vars
.
push_back
(
var
->
grads_
->
var_
.
get
());
}
}
}
for
(
auto
it
:
grad_op_desc
->
Outputs
())
{
auto
&
grad_out_vars
=
op
->
grad_output_vars_
[
it
.
first
];
for
(
const
std
::
string
&
grad_outvar
:
it
.
second
)
{
block
->
FindRecursiveOrCreateVar
(
grad_outvar
);
auto
var_it
=
grad_to_var
->
find
(
grad_outvar
);
PADDLE_ENFORCE
(
var_it
!=
grad_to_var
->
end
());
VarBase
*
var
=
vars
[
var_it
->
second
];
if
(
!
var
->
grads_
->
var_
->
IsInitialized
())
{
InitVar
(
var
->
var_
.
get
(),
var
->
grads_
->
var_
.
get
());
}
grad_out_vars
.
push_back
(
var
->
grads_
->
var_
.
get
());
}
}
}
op
->
block_
=
block
;
}
framework
::
BlockDesc
*
block
,
const
bool
stop_gradient
=
false
);
private:
framework
::
BlockDesc
*
root_block_
;
...
...
paddle/fluid/pybind/CMakeLists.txt
浏览文件 @
08e2a5d6
set
(
PYBIND_DEPS pybind python proto_desc memory executor async_executor prune feed_fetch_method pass_builder parallel_executor profiler layer scope_pool
)
set
(
PYBIND_DEPS pybind python proto_desc memory executor async_executor prune
feed_fetch_method pass_builder parallel_executor profiler layer scope_pool
tracer
)
if
(
WITH_PYTHON
)
list
(
APPEND PYBIND_DEPS py_func_op
)
endif
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录