Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
336160e6
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
336160e6
编写于
12月 27, 2018
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Complete imperative optimizer implementation
test=develop
上级
28013a50
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
39 addition
and
65 deletion
+39
-65
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+0
-1
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+0
-1
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+21
-9
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+2
-3
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-2
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
+14
-49
未找到文件。
paddle/fluid/imperative/tracer.h
浏览文件 @
336160e6
...
@@ -84,7 +84,6 @@ class Tracer {
...
@@ -84,7 +84,6 @@ class Tracer {
*
op
->
output_vars_
=
outputs
;
*
op
->
output_vars_
=
outputs
;
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
const
std
::
string
vname
=
outputs
[
i
]
->
var_desc_
->
Name
();
const
std
::
string
vname
=
outputs
[
i
]
->
var_desc_
->
Name
();
LOG
(
ERROR
)
<<
"output name: "
<<
vname
;
framework
::
Variable
*
var
=
root_scope_
->
Var
(
vname
);
framework
::
Variable
*
var
=
root_scope_
->
Var
(
vname
);
if
(
!
var
->
IsInitialized
())
{
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
336160e6
...
@@ -139,7 +139,6 @@ PYBIND11_MODULE(core, m) {
...
@@ -139,7 +139,6 @@ PYBIND11_MODULE(core, m) {
.
def_property
(
"value"
,
.
def_property
(
"value"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
[](
imperative
::
VarBase
&
self
,
framework
::
Variable
*
var
)
{
[](
imperative
::
VarBase
&
self
,
framework
::
Variable
*
var
)
{
LOG
(
ERROR
)
<<
"set var to pointer: "
<<
var
;
self
.
var_
=
var
;
self
.
var_
=
var
;
},
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
336160e6
...
@@ -1289,13 +1289,22 @@ class Block(object):
...
@@ -1289,13 +1289,22 @@ class Block(object):
Operator: the append Operator.
Operator: the append Operator.
"""
"""
op_desc
=
self
.
desc
.
append_op
()
op_desc
=
self
.
desc
.
append_op
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
type
=
kwargs
.
get
(
"type"
,
None
),
inputs
=
kwargs
.
get
(
"inputs"
,
None
),
outputs
=
kwargs
.
get
(
"outputs"
,
None
),
attrs
=
kwargs
.
get
(
"attrs"
,
None
))
self
.
ops
.
append
(
op
)
self
.
_trace_op
(
op
,
kwargs
.
get
(
"stop_gradient"
,
False
))
return
op
def
_trace_op
(
self
,
op
,
stop_gradient
=
False
):
if
_in_imperative_mode
():
if
_in_imperative_mode
():
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
kwargs
.
get
(
"stop_gradient"
,
False
))
stop_gradient
)
self
.
ops
.
append
(
op
)
return
op
def
_insert_op
(
self
,
index
,
*
args
,
**
kwargs
):
def
_insert_op
(
self
,
index
,
*
args
,
**
kwargs
):
"""
"""
...
@@ -1342,12 +1351,15 @@ class Block(object):
...
@@ -1342,12 +1351,15 @@ class Block(object):
def
_prepend_op
(
self
,
*
args
,
**
kwargs
):
def
_prepend_op
(
self
,
*
args
,
**
kwargs
):
op_desc
=
self
.
desc
.
_prepend_op
()
op_desc
=
self
.
desc
.
_prepend_op
()
op
=
Operator
(
self
,
op_desc
,
*
args
,
**
kwargs
)
op
=
Operator
(
if
_in_imperative_mode
():
self
,
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
op_desc
,
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
type
=
kwargs
.
get
(
"type"
,
None
),
kwargs
.
get
(
"stop_gradient"
,
False
))
inputs
=
kwargs
.
get
(
"inputs"
,
None
),
outputs
=
kwargs
.
get
(
"outputs"
,
None
),
attrs
=
kwargs
.
get
(
"attrs"
,
None
))
self
.
ops
.
insert
(
0
,
op
)
self
.
ops
.
insert
(
0
,
op
)
self
.
_trace_op
(
op
,
kwargs
.
get
(
"stop_gradient"
,
False
))
return
op
return
op
def
_sync_with_cpp
(
self
):
def
_sync_with_cpp
(
self
):
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
336160e6
...
@@ -23,7 +23,6 @@ import numpy as np
...
@@ -23,7 +23,6 @@ import numpy as np
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.
import
unique_name
from
.
import
unique_name
from
paddle.fluid.imperative
import
base
as
imperative_base
from
paddle.fluid.imperative
import
base
as
imperative_base
from
paddle.fluid.imperative.base
import
to_variable
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.
import
core
from
.
import
core
...
@@ -51,7 +50,7 @@ class LayerHelper(object):
...
@@ -51,7 +50,7 @@ class LayerHelper(object):
return
default_startup_program
()
return
default_startup_program
()
def
to_variable
(
self
,
x
):
def
to_variable
(
self
,
x
):
return
base
.
to_variable
(
x
,
self
.
main_program
.
current_block
())
return
imperative_
base
.
to_variable
(
x
,
self
.
main_program
.
current_block
())
def
append_op
(
self
,
*
args
,
**
kwargs
):
def
append_op
(
self
,
*
args
,
**
kwargs
):
return
self
.
main_program
.
current_block
().
append_op
(
*
args
,
**
kwargs
)
return
self
.
main_program
.
current_block
().
append_op
(
*
args
,
**
kwargs
)
...
@@ -371,7 +370,7 @@ class LayerHelper(object):
...
@@ -371,7 +370,7 @@ class LayerHelper(object):
def
set_variable_initializer
(
self
,
var
,
initializer
):
def
set_variable_initializer
(
self
,
var
,
initializer
):
assert
isinstance
(
var
,
Variable
)
assert
isinstance
(
var
,
Variable
)
if
imperative_base
.
enabled
():
if
imperative_base
.
enabled
():
initializer
(
var
,
self
.
startup_program
.
global_block
()
)
initializer
(
var
,
var
.
block
)
else
:
else
:
self
.
startup_program
.
global_block
().
create_var
(
self
.
startup_program
.
global_block
().
create_var
(
name
=
var
.
name
,
name
=
var
.
name
,
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
336160e6
...
@@ -302,7 +302,7 @@ class Optimizer(object):
...
@@ -302,7 +302,7 @@ class Optimizer(object):
This method combines interface `append_backward()` and
This method combines interface `append_backward()` and
`create_optimization_pass()` into one.
`create_optimization_pass()` into one.
"""
"""
if
imperative_base
.
enabled
:
if
imperative_base
.
enabled
()
:
if
parameter_list
is
not
None
:
if
parameter_list
is
not
None
:
params_grads
=
parameter_list
params_grads
=
parameter_list
else
:
else
:
...
@@ -315,7 +315,7 @@ class Optimizer(object):
...
@@ -315,7 +315,7 @@ class Optimizer(object):
block
=
loss
.
block
,
block
=
loss
.
block
,
name
=
param
.
_ivar
.
_grad_name
(),
name
=
param
.
_ivar
.
_grad_name
(),
stop_gradient
=
True
)
stop_gradient
=
True
)
grad_var
.
_value
=
param
.
_ivar
.
grad_value
()
grad_var
.
_value
=
param
.
_ivar
.
grad_value
params_grads
.
append
((
param
,
grad_var
))
params_grads
.
append
((
param
,
grad_var
))
optimize_ops
=
self
.
_create_optimization_pass
(
params_grads
,
loss
,
optimize_ops
=
self
.
_create_optimization_pass
(
params_grads
,
loss
,
...
...
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
浏览文件 @
336160e6
...
@@ -43,15 +43,6 @@ class SimpleImgConvPool(fluid.imperative.PyLayer):
...
@@ -43,15 +43,6 @@ class SimpleImgConvPool(fluid.imperative.PyLayer):
bias_attr
=
None
):
bias_attr
=
None
):
super
(
SimpleImgConvPool
,
self
).
__init__
()
super
(
SimpleImgConvPool
,
self
).
__init__
()
# groups = 1
# dilation = [1, 1]
# pad = [0, 0]
# stride = [1, 1]
# input_size = [2, 3, 5, 5] # NCHW
# assert np.mod(input_size[1], groups) == 0
# f_c = input_size[1] // groups
# filter_size = [6, f_c, 3, 3]
self
.
_conv2d
=
Conv2D
(
self
.
_conv2d
=
Conv2D
(
num_channels
=
num_channels
,
num_channels
=
num_channels
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
...
@@ -108,47 +99,21 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -108,47 +99,21 @@ class TestImperativeMnist(unittest.TestCase):
def
test_mnist_cpu_float32
(
self
):
def
test_mnist_cpu_float32
(
self
):
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
mnist
=
MNIST
()
mnist
=
MNIST
()
x_data
=
np
.
random
.
rand
(
128
,
1
,
28
,
28
).
astype
(
'float32'
)
img
=
to_variable
(
x_data
)
y_data
=
np
.
random
.
rand
(
128
,
1
).
astype
(
'int64'
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
predict
=
mnist
(
img
)
out
=
fluid
.
layers
.
cross_entropy
(
predict
,
label
)
out
.
_backward
()
filter_grad
=
mnist
.
_simple_img_conv_pool_1
.
_conv2d
.
_filter_param
.
_gradient
(
)
# print(filter_grad)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
.
minimize
(
out
)
for
i
in
range
(
1
):
# np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
x_data
=
np
.
random
.
rand
(
128
,
1
,
28
,
28
).
astype
(
'float32'
)
# with fluid.imperative.guard():
img
=
to_variable
(
x_data
)
# mlp = MLP()
y_data
=
np
.
random
.
rand
(
128
,
1
).
astype
(
'int64'
)
# out = mlp(np_inp)
label
=
to_variable
(
y_data
)
# dy_out = out._numpy()
label
.
_stop_gradient
=
True
# out._backward()
# dy_grad = mlp._fc1._w._gradient()
predict
=
mnist
(
img
)
out
=
fluid
.
layers
.
cross_entropy
(
predict
,
label
)
# with new_program_scope():
out
.
_backward
()
# inp = fluid.layers.data(
filter_grad
=
mnist
.
_simple_img_conv_pool_1
.
_conv2d
.
_filter_param
.
_gradient
(
# name="inp", shape=[2, 2], append_batch_size=False)
)
# mlp = MLP()
sgd
.
minimize
(
out
)
# out = mlp(inp)
# param_grads = fluid.backward.append_backward(
# out, parameter_list=[mlp._fc1._w.name])[0]
# exe = fluid.Executor(fluid.CPUPlace())
# exe.run(fluid.default_startup_program())
# static_out, static_grad = exe.run(
# feed={inp.name: np_inp},
# fetch_list=[out.name, param_grads[1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录