Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
63240326
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
63240326
编写于
12月 13, 2018
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
MLP forward backward
test=develop
上级
c89a1fb2
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
167 addition
and
21 deletion
+167
-21
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+4
-2
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+19
-6
paddle/fluid/operators/mul_op.cc
paddle/fluid/operators/mul_op.cc
+2
-1
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+3
-2
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+5
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+3
-0
python/paddle/fluid/imperative/base.py
python/paddle/fluid/imperative/base.py
+2
-1
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+8
-3
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+46
-0
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+75
-4
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
63240326
...
...
@@ -188,11 +188,13 @@ std::vector<Variable*> OpBase::ApplyGrad(framework::Scope* scope) {
std
::
vector
<
Variable
*>
ret
;
for
(
size_t
i
=
0
;
i
<
input_vars_
->
size
();
++
i
)
{
bool
found
=
false
;
VarBase
*
origin_var
=
(
*
input_vars_
)[
i
];
for
(
const
std
::
string
&
outvar
:
grad_op_desc_
->
OutputArgumentNames
())
{
Variable
*
var
=
scope
->
FindVar
(
outvar
);
VarBase
*
origin_var
=
(
*
input_vars_
)[
i
];
std
::
string
orig_var
=
grad_to_var_
->
at
(
outvar
);
PADDLE_ENFORCE
(
origin_var
->
var_desc_
->
Name
()
==
orig_var
);
if
(
origin_var
->
var_desc_
->
Name
()
!=
orig_var
)
{
continue
;
}
VLOG
(
3
)
<<
"apply grad "
<<
outvar
<<
" with origin "
<<
orig_var
;
origin_var
->
ApplyGrad
(
scope
,
var
);
found
=
true
;
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
63240326
...
...
@@ -43,9 +43,12 @@ void CreateGradOp(const framework::OpDesc& op_desc,
class
Tracer
{
public:
explicit
Tracer
(
framework
::
BlockDesc
*
root_block
)
:
root_block_
(
root_block
)
{
explicit
Tracer
(
framework
::
BlockDesc
*
root_block
,
framework
::
BlockDesc
*
startup_block
)
:
root_block_
(
root_block
),
startup_block_
(
startup_block
)
{
root_scope_
=
new
framework
::
Scope
();
scopes_
[
root_block_
]
=
root_scope_
;
scopes_
[
startup_block_
]
=
root_scope_
;
}
virtual
~
Tracer
()
{
delete
root_scope_
;
}
...
...
@@ -80,6 +83,8 @@ class Tracer {
}
else
{
op
->
pre_ops_
->
push_back
(
nullptr
);
}
VLOG
(
3
)
<<
"input vname "
<<
vname
<<
" "
<<
var
->
Get
<
framework
::
LoDTensor
>
().
dims
().
size
();
}
*
op
->
output_vars_
=
outputs
;
...
...
@@ -98,12 +103,19 @@ class Tracer {
outputs
[
i
]
->
pre_op_
=
op
;
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
}
VLOG
(
3
)
<<
"tracer running "
<<
op_desc
->
Type
();
op_base
->
Run
(
*
scope
,
platform
::
CPUPlace
());
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_to_var_
=
grad_to_var
;
if
(
block
==
startup_block_
)
{
op
->
grad_op_desc_
=
nullptr
;
op
->
grad_to_var_
=
nullptr
;
}
else
{
framework
::
OpDesc
*
grad_op_desc
;
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_to_var_
=
grad_to_var
;
}
op
->
block_
=
block
;
}
...
...
@@ -121,6 +133,7 @@ class Tracer {
private:
std
::
map
<
framework
::
BlockDesc
*
,
framework
::
Scope
*>
scopes_
;
framework
::
BlockDesc
*
root_block_
;
framework
::
BlockDesc
*
startup_block_
;
framework
::
Scope
*
root_scope_
;
};
...
...
paddle/fluid/operators/mul_op.cc
浏览文件 @
63240326
...
...
@@ -49,7 +49,8 @@ class MulOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GT
(
y_dims
.
size
(),
y_num_col_dims
,
"The input tensor Y's rank of MulOp should be larger than "
"y_num_col_dims."
);
"y_num_col_dims: %ld vs %ld"
,
y_dims
.
size
(),
y_num_col_dims
);
auto
x_mat_dims
=
framework
::
flatten_to_2d
(
x_dims
,
x_num_col_dims
);
auto
y_mat_dims
=
framework
::
flatten_to_2d
(
y_dims
,
y_num_col_dims
);
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
63240326
...
...
@@ -24,8 +24,9 @@ namespace pybind {
void
BindTracer
(
pybind11
::
module
*
m
)
{
pybind11
::
class_
<
imperative
::
Tracer
>
(
*
m
,
"Tracer"
,
""
)
.
def
(
"__init__"
,
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
root_block
)
{
new
(
&
self
)
imperative
::
Tracer
(
root_block
);
[](
imperative
::
Tracer
&
self
,
framework
::
BlockDesc
*
root_block
,
framework
::
BlockDesc
*
startup_block
)
{
new
(
&
self
)
imperative
::
Tracer
(
root_block
,
startup_block
);
})
.
def
(
"trace"
,
&
imperative
::
Tracer
::
Trace
)
.
def
(
"get_scope"
,
&
imperative
::
Tracer
::
GetScope
,
...
...
python/paddle/fluid/backward.py
浏览文件 @
63240326
...
...
@@ -489,8 +489,11 @@ def append_backward(loss, parameter_list=None, no_grad_set=None,
grad_to_var
=
dict
()
op_desc
=
_create_op_desc_
(
"fill_constant"
,
{},
{
"Out"
:
[
_append_grad_suffix_
(
loss
.
name
)]},
{
"shape"
:
[
1
],
"fill_constant"
,
{},
{
"Out"
:
[
_append_grad_suffix_
(
loss
.
name
)]},
{
"shape"
:
[
1
],
# TODO(panyx0718): This can be loss.shape.
"value"
:
1.0
,
"dtype"
:
loss
.
dtype
,
"force_cpu"
:
False
,
...
...
python/paddle/fluid/framework.py
浏览文件 @
63240326
...
...
@@ -1316,6 +1316,9 @@ class Block(object):
def
_prepend_op
(
self
,
*
args
,
**
kwargs
):
op_desc
=
self
.
desc
.
_prepend_op
()
op
=
Operator
(
self
,
op_desc
,
*
args
,
**
kwargs
)
if
_in_imperative_mode
():
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
)
self
.
ops
.
insert
(
0
,
op
)
return
op
...
...
python/paddle/fluid/imperative/base.py
浏览文件 @
63240326
...
...
@@ -28,7 +28,8 @@ def enabled():
def
guard
():
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
tracer
=
core
.
Tracer
(
train
.
current_block
().
desc
)
tracer
=
core
.
Tracer
(
train
.
current_block
().
desc
,
startup
.
current_block
().
desc
)
with
framework
.
program_guard
(
train
,
startup
):
with
framework
.
unique_name
.
guard
():
with
framework
.
_imperative_guard
(
tracer
):
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
63240326
...
...
@@ -25,11 +25,9 @@ __all__ = ['PyLayer']
class
PyLayer
(
core
.
Layer
):
def
__init__
(
self
):
pass
self
.
_built
=
False
def
__call__
(
self
,
inputs
):
# TODO(panyx0718): Support declarative mode as well.
assert
base
.
enabled
()
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
inputs
=
[
inputs
]
...
...
@@ -37,8 +35,15 @@ class PyLayer(core.Layer):
for
x
in
inputs
:
py_var
=
base
.
to_variable
(
x
)
var_inputs
.
append
(
py_var
)
if
not
self
.
_built
:
self
.
_build_once
(
inputs
)
self
.
_built
=
True
outputs
=
self
.
forward
(
var_inputs
)
return
outputs
def
_build_once
(
self
,
inputs
):
pass
def
forward
(
self
,
inputs
):
return
[]
python/paddle/fluid/layers/nn.py
浏览文件 @
63240326
...
...
@@ -29,6 +29,7 @@ from . import utils
from
..
import
unique_name
from
functools
import
reduce
from
..
import
core
from
..imperative
import
layers
__all__
=
[
'fc'
,
...
...
@@ -9426,3 +9427,48 @@ def huber_loss(input, label, delta):
'Residual'
:
residual
},
attrs
=
{
'delta'
:
delta
})
return
out
class
FC
(
layers
.
PyLayer
):
def
__init__
(
self
,
size
,
param_attr
=
None
,
num_flatten_dims
=
1
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
):
super
(
FC
,
self
).
__init__
()
self
.
_size
=
size
self
.
_num_flatten_dims
=
num_flatten_dims
self
.
_dtype
=
dtype
self
.
_helper
=
LayerHelper
(
'FC'
,
param_attr
=
param_attr
)
def
_build_once
(
self
,
inputs
):
input_shape
=
inputs
[
0
].
shape
param_shape
=
[
reduce
(
lambda
a
,
b
:
a
*
b
,
input_shape
[
self
.
_num_flatten_dims
:],
1
)
]
+
[
self
.
_size
]
self
.
_w
=
self
.
_helper
.
create_parameter
(
attr
=
self
.
_helper
.
param_attr
,
shape
=
param_shape
,
dtype
=
self
.
_dtype
,
is_bias
=
False
)
def
forward
(
self
,
inputs
):
tmp
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
"mul"
,
inputs
=
{
"X"
:
inputs
[
0
],
"Y"
:
self
.
_w
},
outputs
=
{
"Out"
:
tmp
},
attrs
=
{
"x_num_col_dims"
:
self
.
_num_flatten_dims
,
"y_num_col_dims"
:
1
})
out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
self
.
_helper
.
append_op
(
type
=
"sum"
,
inputs
=
{
"X"
:
[
tmp
]},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"use_mkldnn"
:
False
})
return
out
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
63240326
...
...
@@ -12,12 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
contextlib
import
unittest
import
sys
import
numpy
as
np
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.layers.nn
import
FC
@
contextlib
.
contextmanager
def
new_program_scope
():
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
scope
=
fluid
.
core
.
Scope
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
program_guard
(
prog
,
startup_prog
):
yield
class
MyLayer
(
fluid
.
imperative
.
PyLayer
):
...
...
@@ -30,6 +41,23 @@ class MyLayer(fluid.imperative.PyLayer):
return
[
fluid
.
layers
.
elementwise_mul
(
x
,
x
)]
class
MLP
(
fluid
.
imperative
.
PyLayer
):
def
__init__
(
self
):
super
(
MLP
,
self
).
__init__
()
self
.
_fc1
=
FC
(
3
,
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.1
)))
self
.
_fc2
=
FC
(
4
,
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.1
)))
def
forward
(
self
,
inputs
):
x
=
self
.
_fc1
(
inputs
[
0
])
x
=
self
.
_fc2
(
x
)
x
=
fluid
.
layers
.
reduce_sum
(
x
)
return
x
class
TestImperative
(
unittest
.
TestCase
):
def
test_layer
(
self
):
with
fluid
.
imperative
.
guard
():
...
...
@@ -39,13 +67,56 @@ class TestImperative(unittest.TestCase):
l
.
forward
([])
def
test_layer_in_out
(
self
):
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
():
l
=
MyLayer
()
x
=
l
(
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
)[
0
]
x
=
l
(
np
_inp
)[
0
]
self
.
assertIsNotNone
(
x
)
sys
.
stderr
.
write
(
"%s output: %s
\n
"
%
(
x
,
x
.
_numpy
())
)
dy_out
=
x
.
_numpy
(
)
x
.
_backward
()
sys
.
stderr
.
write
(
"grad %s
\n
"
%
l
.
_x_for_debug
.
_gradient
())
dy_grad
=
l
.
_x_for_debug
.
_gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
3
],
append_batch_size
=
False
)
l
=
MyLayer
()
x
=
l
(
inp
)[
0
]
param_grads
=
fluid
.
backward
.
append_backward
(
x
,
parameter_list
=
[
l
.
_x_for_debug
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
x
.
name
,
param_grads
[
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
def
test_mlp
(
self
):
np_inp
=
np
.
array
([[
1.0
,
2.0
],
[
3.0
,
4.0
]],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
():
mlp
=
MLP
()
out
=
mlp
(
np_inp
)
dy_out
=
out
.
_numpy
()
out
.
_backward
()
dy_grad
=
mlp
.
_fc1
.
_w
.
_gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
mlp
=
MLP
()
out
=
mlp
(
inp
)
param_grads
=
fluid
.
backward
.
append_backward
(
out
,
parameter_list
=
[
mlp
.
_fc1
.
_w
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
out
.
name
,
param_grads
[
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录