Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
dc8847af
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
dc8847af
编写于
12月 18, 2018
作者:
S
sneaxiy
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add examples and comments
test=develop
上级
f0df62f1
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
92 addition
and
26 deletion
+92
-26
paddle/fluid/operators/py_func_op.cc
paddle/fluid/operators/py_func_op.cc
+51
-26
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+41
-0
未找到文件。
paddle/fluid/operators/py_func_op.cc
浏览文件 @
dc8847af
...
@@ -43,9 +43,12 @@ static py::object *GetPythonCallableObject(size_t i) {
...
@@ -43,9 +43,12 @@ static py::object *GetPythonCallableObject(size_t i) {
return
&
g_py_callables
[
i
];
return
&
g_py_callables
[
i
];
}
}
static
std
::
string
Python
ObjectTo
String
(
const
py
::
object
&
py_callable
)
{
static
std
::
string
Python
FuncDebug
String
(
const
py
::
object
&
py_callable
)
{
py
::
gil_scoped_acquire
guard
;
py
::
gil_scoped_acquire
guard
;
return
py
::
str
(
*
py_callable
);
std
::
string
wrapper_func_str
=
py
::
str
(
py_callable
);
auto
inner_func
=
py_callable
.
attr
(
"_func"
);
std
::
string
inner_func_str
=
py
::
str
(
inner_func
);
return
inner_func_str
+
" wrapped by "
+
wrapper_func_str
;
}
}
static
void
CallPythonFunc
(
py
::
object
*
callable
,
static
void
CallPythonFunc
(
py
::
object
*
callable
,
...
@@ -93,15 +96,29 @@ class PyFuncOpShapeInference : public framework::InferShapeBase {
...
@@ -93,15 +96,29 @@ class PyFuncOpShapeInference : public framework::InferShapeBase {
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
!
ctx
->
IsRuntime
(),
PADDLE_ENFORCE
(
!
ctx
->
IsRuntime
(),
"Infer shape cannot be called in runtime."
);
"Infer shape cannot be called in runtime."
);
/**
* X or Out can be empty, so that py_func can be more flexible
* to support Python functions with no input or no output
*/
PADDLE_ENFORCE
(
ctx
->
HasInputs
(
"X"
)
||
ctx
->
HasOutputs
(
"Out"
),
PADDLE_ENFORCE
(
ctx
->
HasInputs
(
"X"
)
||
ctx
->
HasOutputs
(
"Out"
),
"Input(X) or Output(Out) must exist"
);
"Input(X) or Output(Out) must exist"
);
PADDLE_ENFORCE_GE
(
ctx
->
Attrs
().
Get
<
int
>
(
kForwardPythonCallableId
),
0
,
PADDLE_ENFORCE_GE
(
ctx
->
Attrs
().
Get
<
int
>
(
kForwardPythonCallableId
),
0
,
"Function id cannot be less than 0"
);
"Function id cannot be less than 0"
);
// Transverse all outputs
/**
// If name of any output ends with @GRAD,
* Traverse all outputs, check if name of any output ends with @GRAD.
// set its shape, dtype, lod_level, type to be the same as
* If found, set its shape, dtype, lod_level, type to be the same as
// the correponding forward variable
* the corresponding forward variable
*
* Why not get input dims from InferShapeContext?
* Because some variables in forward inputs/outputs may not be needed
* in backward. Those variables are not inside InferShapeContext.
*
* InferShape would be only called in compile time. During runtime,
* the shapes of outputs should be guaranteed by user-defined Python
* functions.
*/
auto
*
op
=
boost
::
get
<
const
framework
::
OpDesc
*>
(
ctx
->
GetOp
());
auto
*
op
=
boost
::
get
<
const
framework
::
OpDesc
*>
(
ctx
->
GetOp
());
auto
*
block
=
op
->
Block
();
auto
*
block
=
op
->
Block
();
const
std
::
string
kGradVarSuffix
=
framework
::
kGradVarSuffix
;
const
std
::
string
kGradVarSuffix
=
framework
::
kGradVarSuffix
;
...
@@ -113,7 +130,7 @@ class PyFuncOpShapeInference : public framework::InferShapeBase {
...
@@ -113,7 +130,7 @@ class PyFuncOpShapeInference : public framework::InferShapeBase {
}
}
auto
out_name
=
out_var_desc
->
Name
();
auto
out_name
=
out_var_desc
->
Name
();
if
(
out_name
==
framework
::
kEmptyVarName
||
if
(
out_name
==
framework
::
kEmptyVarName
||
out_name
.
size
()
<
=
kGradVarSuffix
.
size
())
{
out_name
.
size
()
<
kGradVarSuffix
.
size
())
{
continue
;
continue
;
}
}
...
@@ -152,7 +169,28 @@ class PyFuncOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -152,7 +169,28 @@ class PyFuncOpMaker : public framework::OpProtoAndCheckerMaker {
}
}
};
};
/**
* There are several benefits when backward op of py_func op is
* still py_func op.
*
* - Less codes are needed, since codes of backward is almost
* the same as forward.
*
* - To support high order derivative, so that py_func is
* infinite-order differentiable
*/
class
PyFuncOpGradDescMaker
:
public
framework
::
GradOpDescMakerBase
{
class
PyFuncOpGradDescMaker
:
public
framework
::
GradOpDescMakerBase
{
private:
static
std
::
string
DebugString
(
const
std
::
vector
<
std
::
string
>
&
strs
)
{
if
(
strs
.
empty
())
return
""
;
std
::
string
ret
=
strs
[
0
];
for
(
size_t
i
=
1
;
i
<
strs
.
size
();
++
i
)
{
ret
+=
" "
;
ret
+=
strs
[
i
];
}
return
ret
;
}
public:
public:
using
framework
::
GradOpDescMakerBase
::
GradOpDescMakerBase
;
using
framework
::
GradOpDescMakerBase
::
GradOpDescMakerBase
;
...
@@ -207,21 +245,8 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase {
...
@@ -207,21 +245,8 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase {
// But in Python side, if IG is not needed, users can just return None
// But in Python side, if IG is not needed, users can just return None
auto
bwd_outs
=
InputGrad
(
"X"
,
false
);
auto
bwd_outs
=
InputGrad
(
"X"
,
false
);
if
(
VLOG_IS_ON
(
10
))
{
VLOG
(
10
)
<<
"PyFunc Grad Input: "
<<
DebugString
(
bwd_ins
);
std
::
string
in_str
=
"PyFunc Grad Input: "
;
VLOG
(
10
)
<<
"PyFunc Grad Output: "
<<
DebugString
(
bwd_outs
);
for
(
auto
&
in
:
bwd_ins
)
{
in_str
+=
in
;
in_str
+=
" "
;
}
VLOG
(
10
)
<<
in_str
;
std
::
string
out_str
=
"PyFunc Grad Output: "
;
for
(
auto
&
out
:
bwd_outs
)
{
out_str
+=
out
;
out_str
+=
" "
;
}
VLOG
(
10
)
<<
out_str
;
}
grad_op
->
SetInput
(
"X"
,
bwd_ins
);
grad_op
->
SetInput
(
"X"
,
bwd_ins
);
grad_op
->
SetOutput
(
"Out"
,
bwd_outs
);
grad_op
->
SetOutput
(
"Out"
,
bwd_outs
);
...
@@ -245,6 +270,7 @@ class PyFuncOp : public framework::OperatorBase {
...
@@ -245,6 +270,7 @@ class PyFuncOp : public framework::OperatorBase {
std
::
vector
<
framework
::
LoDTensor
>
inputs
(
in_arg_names
.
size
());
std
::
vector
<
framework
::
LoDTensor
>
inputs
(
in_arg_names
.
size
());
for
(
size_t
i
=
0
;
i
<
in_arg_names
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
in_arg_names
.
size
();
++
i
)
{
auto
in_var
=
scope
.
FindVar
(
in_arg_names
[
i
]);
auto
in_var
=
scope
.
FindVar
(
in_arg_names
[
i
]);
// When py_func op is called in backward, in_var may be null
if
(
in_var
==
nullptr
)
{
if
(
in_var
==
nullptr
)
{
continue
;
continue
;
}
}
...
@@ -263,15 +289,14 @@ class PyFuncOp : public framework::OperatorBase {
...
@@ -263,15 +289,14 @@ class PyFuncOp : public framework::OperatorBase {
std
::
vector
<
framework
::
LoDTensor
*>
outputs
(
out_arg_names
.
size
());
std
::
vector
<
framework
::
LoDTensor
*>
outputs
(
out_arg_names
.
size
());
for
(
size_t
i
=
0
;
i
<
out_arg_names
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
out_arg_names
.
size
();
++
i
)
{
auto
*
out_var
=
scope
.
FindVar
(
out_arg_names
[
i
]);
auto
*
out_var
=
scope
.
FindVar
(
out_arg_names
[
i
]);
auto
*
out_tensor
=
outputs
[
i
]
=
out_var
?
out_var
->
GetMutable
<
framework
::
LoDTensor
>
()
:
nullptr
;
out_var
?
out_var
->
GetMutable
<
framework
::
LoDTensor
>
()
:
nullptr
;
outputs
[
i
]
=
out_tensor
;
}
}
auto
callable_id
=
static_cast
<
size_t
>
(
Attr
<
int
>
(
kForwardPythonCallableId
));
auto
callable_id
=
static_cast
<
size_t
>
(
Attr
<
int
>
(
kForwardPythonCallableId
));
auto
*
py_callable
=
GetPythonCallableObject
(
callable_id
);
auto
*
py_callable
=
GetPythonCallableObject
(
callable_id
);
VLOG
(
10
)
<<
"Call
py_func_op
with id "
<<
callable_id
<<
": "
VLOG
(
10
)
<<
"Call
Python function
with id "
<<
callable_id
<<
": "
<<
Python
ObjectTo
String
(
*
py_callable
);
<<
Python
FuncDebug
String
(
*
py_callable
);
CallPythonFunc
(
py_callable
,
inputs
,
&
outputs
);
CallPythonFunc
(
py_callable
,
inputs
,
&
outputs
);
}
}
};
};
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
dc8847af
...
@@ -9243,6 +9243,47 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
...
@@ -9243,6 +9243,47 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
Returns:
Returns:
out (Variable|list(Variable)|tuple(Variable)): input :code:`out`
out (Variable|list(Variable)|tuple(Variable)): input :code:`out`
Examples:
>>> import paddle.fluid as fluid
>>> import six
>>>
>>> def create_tmp_var(name, dtype, shape):
>>> return fluid.default_main_program().current_block().create_var(
>>> name=name, dtype=dtype, shape=shape)
>>>
>>> # tanh activation has been provided by Paddle C++ op
>>> # Here, we only use tanh to be an example to show the usage
>>> # of py_func
>>> def tanh(x):
>>> return np.tanh(x)
>>>
>>> # forward input x is skipped
>>> def tanh_grad(y, dy):
>>> return np.array(dy) * (1 - np.square(np.array(y)))
>>>
>>> def debug_func(x):
>>> print(x)
>>>
>>> def simple_net(img, label):
>>> hidden = img
>>> for idx in six.moves.range(4):
>>> hidden = fluid.layers.fc(hidden, size=200)
>>> new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
>>> dtype=hidden.dtype, shape=hidden.shape)
>>>
>>> # user-defined layers with forward and backward
>>> hidden = fluid.layers.py_func(func=tanh, x=hidden,
>>> out=new_hidden, backward_func=tanh_grad,
>>> skip_vars_in_backward_input=hidden)
>>>
>>> # user-defined debug layers to print variables
>>> fluid.layers.py_func(func=debug_func, x=hidden, out=None)
>>>
>>> prediction = fluid.layers.fc(hidden, size=10, act='softmax')
>>> loss = fluid.layers.cross_entropy(input=prediction, label=label)
>>> return fluid.layers.mean(loss)
"""
"""
helper
=
LayerHelper
(
'py_func'
,
**
locals
())
helper
=
LayerHelper
(
'py_func'
,
**
locals
())
if
x
is
None
:
if
x
is
None
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录