Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2349acea
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2349acea
编写于
1月 08, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
checkpoint
test=develop
上级
11d4d39c
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
78 addition
and
24 deletion
+78
-24
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+19
-0
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+14
-13
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+15
-0
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+3
-1
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+17
-7
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+10
-3
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
2349acea
...
...
@@ -27,6 +27,8 @@
namespace
paddle
{
namespace
imperative
{
std
::
map
<
int
,
py
::
object
>
py_funcs_
;
using
framework
::
Variable
;
void
AddTo
(
Variable
*
src
,
Variable
*
dst
)
{
...
...
@@ -183,5 +185,22 @@ void VarBase::RunBackward() {
Autograd
().
RunBackward
(
this
);
}
void
PyLayer
::
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
)
{
py_funcs_
[
func_id
]
=
py_func
;
}
std
::
vector
<
VarBase
*>
PyLayer
::
Apply
(
int
func_id
,
const
std
::
vector
<
VarBase
>&
inputs
)
{
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
std
::
vector
<
VarBase
*>
ret
;
for
(
const
VarBase
&
in
:
inputs
)
{
tensor_inputs
.
push_back
(
in
.
var_
->
Get
<
framework
::
LoDTensor
>
());
}
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
CallPythonFunc
(
py_funcs_
[
func_id
],
tensor_inputs
,
&
ret
);
return
ret
;
}
}
// namespace imperative
}
// namespace paddle
paddle/fluid/imperative/layer.h
浏览文件 @
2349acea
...
...
@@ -82,6 +82,7 @@ class PreparedOp {
framework
::
OperatorWithKernel
::
OpKernelFunc
func
;
platform
::
DeviceContext
*
dev_ctx
;
};
class
OpBase
;
class
VarBase
{
...
...
@@ -128,7 +129,11 @@ class VarBase {
class
OpBase
{
public:
OpBase
()
:
op_desc_
(
nullptr
),
grad_op_desc_
(
nullptr
)
{}
OpBase
()
:
op_desc_
(
nullptr
),
grad_op_desc_
(
nullptr
),
forward_id_
(
-
1
),
backward_id_
(
-
1
)
{}
virtual
~
OpBase
()
{
if
(
grad_op_desc_
)
delete
grad_op_desc_
;
...
...
@@ -139,6 +144,9 @@ class OpBase {
framework
::
OpDesc
*
op_desc_
;
framework
::
OpDesc
*
grad_op_desc_
;
int
forward_id_
;
int
backward_id_
;
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
input_vars_
;
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
output_vars_
;
std
::
map
<
std
::
string
,
std
::
vector
<
OpBase
*>>
pre_ops_
;
...
...
@@ -159,7 +167,7 @@ class Layer {
}
};
static
void
CallPythonFunc
(
py
::
object
*
callable
,
static
void
CallPythonFunc
(
const
py
::
object
&
callable
,
const
std
::
vector
<
framework
::
LoDTensor
>&
ins
,
std
::
vector
<
VarBase
*>*
outs
)
{
py
::
gil_scoped_acquire
guard
;
...
...
@@ -169,7 +177,7 @@ static void CallPythonFunc(py::object* callable,
}
// TODO(panyx0718): Who owns the returned LoDTensor.
auto
ret
=
(
*
callable
)
(
in_args
);
auto
ret
=
callable
(
in_args
);
auto
ret_tuple
=
py
::
cast
<
py
::
tuple
>
(
ret
);
size_t
ret_num
=
py
::
len
(
ret_tuple
);
for
(
size_t
i
=
0
;
i
<
ret_num
;
++
i
)
{
...
...
@@ -192,17 +200,10 @@ class PyLayer {
public:
virtual
~
PyLayer
()
{}
static
std
::
vector
<
VarBase
*>
Apply
(
py
::
object
*
callable
,
const
std
::
vector
<
VarBase
>&
inputs
)
{
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
std
::
vector
<
VarBase
*>
ret
;
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
for
(
const
VarBase
&
in
:
inputs
)
{
tensor_inputs
.
push_back
(
in
.
var_
->
Get
<
framework
::
LoDTensor
>
());
}
CallPythonFunc
(
callable
,
tensor_inputs
,
&
ret
);
return
ret
;
}
static
std
::
vector
<
VarBase
*>
Apply
(
int
func_id
,
const
std
::
vector
<
VarBase
>&
inputs
);
};
}
// namespace imperative
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
2349acea
...
...
@@ -172,6 +172,21 @@ class Tracer {
op
->
block_
=
block
;
}
std
::
vector
<
VarBase
*>
PyTrace
(
OpBase
*
op
,
const
std
::
vector
<
VarBase
>&
inputs
)
{
std
::
vector
<
VarBase
*>
outputs
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
/*
for (const VarBase& inp : inputs) {
if (inp.pre_op_) {
op->pre_ops_[it.first].push_back(inp->pre_op_);
op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_);
} else {
op->pre_ops_[it.first].push_back(nullptr);
}
}*/
return
outputs
;
}
private:
framework
::
BlockDesc
*
root_block_
;
};
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
2349acea
...
...
@@ -26,7 +26,9 @@ void BindTracer(pybind11::module *m) {
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
root_block
)
{
new
(
&
self
)
imperative
::
Tracer
(
root_block
);
})
.
def
(
"trace"
,
&
imperative
::
Tracer
::
Trace
);
.
def
(
"trace"
,
&
imperative
::
Tracer
::
Trace
)
.
def
(
"py_trace"
,
&
imperative
::
Tracer
::
PyTrace
,
pybind11
::
return_value_policy
::
take_ownership
);
}
}
// namespace pybind
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
2349acea
...
...
@@ -168,6 +168,13 @@ PYBIND11_MODULE(core, m) {
self
.
op_desc_
=
op_desc
;
}
},
py
::
return_value_policy
::
reference
)
.
def_property
(
"forward_id"
,
[](
const
imperative
::
OpBase
&
self
)
{
return
self
.
forward_id_
;
},
[](
imperative
::
OpBase
&
self
,
int
forward_id
)
{
self
.
forward_id_
=
forward_id
;
},
py
::
return_value_policy
::
reference
);
py
::
class_
<
imperative
::
Layer
,
Layer
/* <--- trampoline*/
>
layer
(
m
,
"Layer"
);
...
...
@@ -179,13 +186,16 @@ PYBIND11_MODULE(core, m) {
py
::
class_
<
paddle
::
imperative
::
PyLayer
>
(
m
,
"PyLayer"
)
.
def
(
py
::
init
<>
())
.
def_static
(
"apply"
,
[](
py
::
object
*
callable
,
const
std
::
vector
<
imperative
::
VarBase
>
&
inputs
)
->
std
::
vector
<
imperative
::
VarBase
*>
{
return
imperative
::
PyLayer
::
Apply
(
callable
,
inputs
);
},
py
::
return_value_policy
::
take_ownership
);
.
def_static
(
"apply"
,
[](
int
func_id
,
const
std
::
vector
<
imperative
::
VarBase
>
&
inputs
)
->
std
::
vector
<
imperative
::
VarBase
*>
{
return
imperative
::
PyLayer
::
Apply
(
func_id
,
inputs
);
},
py
::
return_value_policy
::
take_ownership
)
.
def_static
(
"register_func"
,
[](
int
func_id
,
const
py
::
object
&
callable
)
{
imperative
::
PyLayer
::
RegisterFunc
(
func_id
,
callable
);
});
BindTracer
(
&
m
);
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
2349acea
...
...
@@ -48,7 +48,6 @@ class Layer(core.Layer):
raise
ValueError
(
"Layer shouldn't implement backward"
)
# TODO(panyx0718): Inherit from C++ base class.
class
PyLayer
(
core
.
PyLayer
):
"""Layers composed of user-defined python codes."""
...
...
@@ -65,13 +64,21 @@ class PyLayer(core.PyLayer):
@
classmethod
def
__call__
(
cls
,
inputs
):
tracer
=
framework
.
_imperative_tracer
()
block
=
framework
.
default_main_program
().
current_block
()
inputs
=
map
(
base
.
to_variable
,
inputs
)
inputs
=
[
x
.
_ivar
for
x
in
inputs
]
ivars
=
core
.
PyLayer
.
apply
(
cls
.
forward
,
inputs
)
PyLayer
.
register_func
(
1
,
cls
.
forward
)
iop
=
core
.
OpBase
()
iop
.
forward_id
=
1
block
.
ops
.
append
(
iop
)
ivars
=
tracer
.
py_trace
(
iop
,
inputs
)
# ivars = core.PyLayer.apply(cls.forward, inputs)
ret
=
[]
for
ivar
in
ivars
:
tensor
=
ivar
.
value
.
get_tensor
()
block
=
framework
.
default_main_program
().
current_block
()
py_var
=
framework
.
Variable
(
block
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录