Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5822f7f1
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5822f7f1
编写于
12月 27, 2018
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
fff44af8
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
26 addition
and
69 deletion
+26
-69
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+1
-1
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+0
-1
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+2
-5
paddle/fluid/operators/optimizers/sgd_op.h
paddle/fluid/operators/optimizers/sgd_op.h
+0
-5
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+6
-11
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+2
-19
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+0
-1
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+11
-7
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+3
-10
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+1
-9
未找到文件。
paddle/fluid/framework/framework.proto
浏览文件 @
5822f7f1
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
syntax
=
"proto2"
;
syntax
=
"proto2"
;
/* option optimize_for = LITE_RUNTIME; */
option
optimize_for
=
LITE_RUNTIME
;
package
paddle
.
framework.proto
;
package
paddle
.
framework.proto
;
// Any incompatible changes to ProgramDesc and its dependencies should
// Any incompatible changes to ProgramDesc and its dependencies should
...
...
paddle/fluid/imperative/layer.cc
浏览文件 @
5822f7f1
...
@@ -192,7 +192,6 @@ std::vector<Variable*> OpBase::ApplyGrad(framework::Scope* scope) {
...
@@ -192,7 +192,6 @@ std::vector<Variable*> OpBase::ApplyGrad(framework::Scope* scope) {
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
LOG
(
ERROR
)
<<
"tracer doesn't support yet"
;
}
}
}
}
VLOG
(
3
)
<<
"op grad output var "
<<
outvar
<<
" is inited"
;
}
}
grad_op_desc_
->
InferShape
(
*
block_
);
grad_op_desc_
->
InferShape
(
*
block_
);
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
5822f7f1
...
@@ -52,7 +52,7 @@ class Tracer {
...
@@ -52,7 +52,7 @@ class Tracer {
const
std
::
vector
<
VarBase
*>&
outputs
,
framework
::
BlockDesc
*
block
,
const
std
::
vector
<
VarBase
*>&
outputs
,
framework
::
BlockDesc
*
block
,
const
bool
stop_gradient
)
{
const
bool
stop_gradient
)
{
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
LOG
(
ERROR
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
...
@@ -61,10 +61,7 @@ class Tracer {
...
@@ -61,10 +61,7 @@ class Tracer {
*
op
->
input_vars_
=
inputs
;
*
op
->
input_vars_
=
inputs
;
for
(
VarBase
*
input
:
inputs
)
{
for
(
VarBase
*
input
:
inputs
)
{
const
std
::
string
vname
=
input
->
var_desc_
->
Name
();
const
std
::
string
vname
=
input
->
var_desc_
->
Name
();
LOG
(
ERROR
)
<<
"input: "
<<
vname
;
LOG
(
ERROR
)
<<
"input var: "
<<
input
->
var_
;
framework
::
Variable
*
var
=
root_scope_
->
Var
(
vname
);
framework
::
Variable
*
var
=
root_scope_
->
Var
(
vname
);
LOG
(
ERROR
)
<<
"var_ in tracer pointer: "
<<
var
;
input
->
var_
=
var
;
input
->
var_
=
var
;
if
(
!
var
->
IsInitialized
())
{
if
(
!
var
->
IsInitialized
())
{
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
framework
::
VarDesc
*
var_desc
=
block
->
FindVar
(
vname
);
...
@@ -102,7 +99,7 @@ class Tracer {
...
@@ -102,7 +99,7 @@ class Tracer {
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
outputs
[
i
]
->
pre_op_out_idx_
=
i
;
}
}
LOG
(
ERROR
)
<<
"tracer running "
<<
op_desc
->
Type
();
VLOG
(
3
)
<<
"tracer running "
<<
op_desc
->
Type
();
op_base
->
Run
(
*
root_scope_
,
platform
::
CPUPlace
());
op_base
->
Run
(
*
root_scope_
,
platform
::
CPUPlace
());
if
(
!
stop_gradient
)
{
if
(
!
stop_gradient
)
{
framework
::
OpDesc
*
grad_op_desc
;
framework
::
OpDesc
*
grad_op_desc
;
...
...
paddle/fluid/operators/optimizers/sgd_op.h
浏览文件 @
5822f7f1
...
@@ -29,8 +29,6 @@ class SGDOpKernel : public framework::OpKernel<T> {
...
@@ -29,8 +29,6 @@ class SGDOpKernel : public framework::OpKernel<T> {
const
auto
*
param_var
=
ctx
.
InputVar
(
"Param"
);
const
auto
*
param_var
=
ctx
.
InputVar
(
"Param"
);
const
auto
*
grad_var
=
ctx
.
InputVar
(
"Grad"
);
const
auto
*
grad_var
=
ctx
.
InputVar
(
"Grad"
);
LOG
(
ERROR
)
<<
"grad_var: "
<<
grad_var
;
if
(
param_var
->
IsType
<
framework
::
LoDTensor
>
())
{
if
(
param_var
->
IsType
<
framework
::
LoDTensor
>
())
{
const
auto
*
param
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Param"
);
const
auto
*
param
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Param"
);
auto
*
param_out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"ParamOut"
);
auto
*
param_out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"ParamOut"
);
...
@@ -41,11 +39,8 @@ class SGDOpKernel : public framework::OpKernel<T> {
...
@@ -41,11 +39,8 @@ class SGDOpKernel : public framework::OpKernel<T> {
const
auto
*
grad
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Grad"
);
const
auto
*
grad
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Grad"
);
auto
p
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
param
);
auto
p
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
param
);
LOG
(
ERROR
)
<<
"param flattened"
;
auto
g
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
grad
);
auto
g
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
grad
);
LOG
(
ERROR
)
<<
"grad flattened"
;
auto
o
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
param_out
);
auto
o
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
param_out
);
LOG
(
ERROR
)
<<
"paramout flattened"
;
auto
*
lr
=
learning_rate
->
data
<
T
>
();
auto
*
lr
=
learning_rate
->
data
<
T
>
();
o
=
p
-
lr
[
0
]
*
g
;
o
=
p
-
lr
[
0
]
*
g
;
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
5822f7f1
...
@@ -117,19 +117,14 @@ PYBIND11_MODULE(core, m) {
...
@@ -117,19 +117,14 @@ PYBIND11_MODULE(core, m) {
[](
imperative
::
VarBase
&
self
,
framework
::
Scope
*
scope
)
{
[](
imperative
::
VarBase
&
self
,
framework
::
Scope
*
scope
)
{
self
.
RunBackward
(
scope
);
self
.
RunBackward
(
scope
);
})
})
.
def
(
"_grad_var"
,
[](
const
imperative
::
VarBase
&
self
)
{
LOG
(
ERROR
)
<<
"grad_var_ pointer: "
<<
self
.
grads_
;
return
self
.
grads_
;
},
py
::
return_value_policy
::
reference
)
.
def
(
"_grad_name"
,
&
imperative
::
VarBase
::
GradName
)
.
def
(
"_grad_name"
,
&
imperative
::
VarBase
::
GradName
)
.
def
(
"_grad"
,
&
imperative
::
VarBase
::
Grad
)
.
def
(
"_grad"
,
&
imperative
::
VarBase
::
Grad
)
.
def
(
"_print_var_pointer"
,
.
def_property
(
"grad_value"
,
[](
const
imperative
::
VarBase
&
self
)
{
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
LOG
(
ERROR
)
<<
self
.
var_desc_
->
Name
()
[](
imperative
::
VarBase
&
self
,
framework
::
Variable
*
grad
)
{
<<
" print_var pointer: "
<<
self
.
var_
;
self
.
grads_
=
grad
;
})
},
py
::
return_value_policy
::
reference
)
.
def_property
(
"value"
,
.
def_property
(
"value"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
[](
imperative
::
VarBase
&
self
,
framework
::
Variable
*
var
)
{
[](
imperative
::
VarBase
&
self
,
framework
::
Variable
*
var
)
{
...
...
python/paddle/fluid/framework.py
浏览文件 @
5822f7f1
...
@@ -361,6 +361,7 @@ class Variable(object):
...
@@ -361,6 +361,7 @@ class Variable(object):
self
.
block
.
vars
[
name
]
=
self
self
.
block
.
vars
[
name
]
=
self
self
.
op
=
None
self
.
op
=
None
self
.
stop_gradient
=
stop_gradient
self
.
is_data
=
is_data
self
.
is_data
=
is_data
if
_in_imperative_mode
():
if
_in_imperative_mode
():
self
.
_ivar
=
core
.
VarBase
()
self
.
_ivar
=
core
.
VarBase
()
...
@@ -368,7 +369,6 @@ class Variable(object):
...
@@ -368,7 +369,6 @@ class Variable(object):
self
.
_ivar
.
stop_gradient
=
stop_gradient
self
.
_ivar
.
stop_gradient
=
stop_gradient
def
_numpy
(
self
):
def
_numpy
(
self
):
print
(
"get_variable_tensor"
,
self
.
desc
.
name
())
scope
=
_imperative_tracer
().
get_scope
()
scope
=
_imperative_tracer
().
get_scope
()
tensor
=
core
.
get_variable_tensor
(
scope
,
self
.
desc
.
name
())
tensor
=
core
.
get_variable_tensor
(
scope
,
self
.
desc
.
name
())
return
np
.
array
(
tensor
)
return
np
.
array
(
tensor
)
...
@@ -597,8 +597,7 @@ class Operator(object):
...
@@ -597,8 +597,7 @@ class Operator(object):
type
=
None
,
type
=
None
,
inputs
=
None
,
inputs
=
None
,
outputs
=
None
,
outputs
=
None
,
attrs
=
None
,
attrs
=
None
):
stop_gradient
=
False
):
self
.
block
=
block
self
.
block
=
block
self
.
desc
=
desc
self
.
desc
=
desc
# note: not add self.attrs here:
# note: not add self.attrs here:
...
@@ -640,7 +639,6 @@ class Operator(object):
...
@@ -640,7 +639,6 @@ class Operator(object):
if
inputs
is
not
None
:
if
inputs
is
not
None
:
for
in_proto
in
proto
.
inputs
:
for
in_proto
in
proto
.
inputs
:
print
(
"create op: find_name"
,
in_proto
.
name
)
found
=
find_name
(
inputs
,
in_proto
.
name
)
found
=
find_name
(
inputs
,
in_proto
.
name
)
assert
found
or
in_proto
.
dispensable
,
"Input {} not found"
.
format
(
assert
found
or
in_proto
.
dispensable
,
"Input {} not found"
.
format
(
in_proto
.
name
)
in_proto
.
name
)
...
@@ -1178,7 +1176,6 @@ class Block(object):
...
@@ -1178,7 +1176,6 @@ class Block(object):
def
create_var
(
self
,
*
args
,
**
kwargs
):
def
create_var
(
self
,
*
args
,
**
kwargs
):
var
=
Variable
(
block
=
self
,
*
args
,
**
kwargs
)
var
=
Variable
(
block
=
self
,
*
args
,
**
kwargs
)
if
'initializer'
in
kwargs
:
if
'initializer'
in
kwargs
:
print
(
"initializer, "
,
type
(
kwargs
[
'initializer'
]))
kwargs
[
'initializer'
](
var
,
self
)
kwargs
[
'initializer'
](
var
,
self
)
return
var
return
var
...
@@ -1293,16 +1290,6 @@ class Block(object):
...
@@ -1293,16 +1290,6 @@ class Block(object):
"""
"""
op_desc
=
self
.
desc
.
append_op
()
op_desc
=
self
.
desc
.
append_op
()
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
op
=
Operator
(
block
=
self
,
desc
=
op_desc
,
*
args
,
**
kwargs
)
print
(
"op inputs: "
,
[
v
.
_numpy
()
for
v
in
op
.
inputs
])
print
(
"op inputs: "
,
[
v
for
v
in
op
.
inputs
])
import
sys
sys
.
stdout
.
flush
()
for
v
in
op
.
inputs
:
v
.
_ivar
.
_print_var_pointer
()
print
(
"print var pointer end"
)
import
sys
sys
.
stdout
.
flush
()
if
_in_imperative_mode
():
if
_in_imperative_mode
():
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
...
@@ -1360,10 +1347,6 @@ class Block(object):
...
@@ -1360,10 +1347,6 @@ class Block(object):
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
_imperative_tracer
().
trace
(
op
.
iop
,
[
v
.
_ivar
for
v
in
op
.
inputs
],
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
[
v
.
_ivar
for
v
in
op
.
outputs
],
self
.
desc
,
kwargs
.
get
(
"stop_gradient"
,
False
))
kwargs
.
get
(
"stop_gradient"
,
False
))
print
([
v
.
name
for
v
in
op
.
outputs
])
for
v
in
op
.
outputs
:
v
.
_ivar
.
_print_var_pointer
()
print
(
"fill_constant end"
)
self
.
ops
.
insert
(
0
,
op
)
self
.
ops
.
insert
(
0
,
op
)
return
op
return
op
...
...
python/paddle/fluid/initializer.py
浏览文件 @
5822f7f1
...
@@ -153,7 +153,6 @@ class ConstantInitializer(Initializer):
...
@@ -153,7 +153,6 @@ class ConstantInitializer(Initializer):
assert
isinstance
(
var
,
framework
.
Variable
)
assert
isinstance
(
var
,
framework
.
Variable
)
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
# Initialization Ops should be prepended and not appended
# Initialization Ops should be prepended and not appended
print
(
"fill_constant"
)
op
=
block
.
_prepend_op
(
op
=
block
.
_prepend_op
(
type
=
"fill_constant"
,
type
=
"fill_constant"
,
outputs
=
{
"Out"
:
var
},
outputs
=
{
"Out"
:
var
},
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
5822f7f1
...
@@ -22,6 +22,7 @@ import numpy as np
...
@@ -22,6 +22,7 @@ import numpy as np
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
from
.
import
unique_name
from
.
import
unique_name
from
paddle.fluid.imperative
import
base
as
imperative_base
from
paddle.fluid.imperative.base
import
to_variable
from
paddle.fluid.imperative.base
import
to_variable
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
paddle.fluid.initializer
import
Constant
,
Xavier
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
from
.param_attr
import
ParamAttr
,
WeightNormParamAttr
...
@@ -369,13 +370,16 @@ class LayerHelper(object):
...
@@ -369,13 +370,16 @@ class LayerHelper(object):
def
set_variable_initializer
(
self
,
var
,
initializer
):
def
set_variable_initializer
(
self
,
var
,
initializer
):
assert
isinstance
(
var
,
Variable
)
assert
isinstance
(
var
,
Variable
)
return
self
.
startup_program
.
global_block
().
create_var
(
if
imperative_base
.
enabled
():
name
=
var
.
name
,
initializer
(
var
,
self
.
startup_program
.
global_block
())
type
=
var
.
type
,
else
:
dtype
=
var
.
dtype
,
self
.
startup_program
.
global_block
().
create_var
(
shape
=
var
.
shape
,
name
=
var
.
name
,
persistable
=
True
,
type
=
var
.
type
,
initializer
=
initializer
)
dtype
=
var
.
dtype
,
shape
=
var
.
shape
,
persistable
=
True
,
initializer
=
initializer
)
def
append_bias_op
(
self
,
input_var
,
dim_start
=
1
,
dim_end
=
None
):
def
append_bias_op
(
self
,
input_var
,
dim_start
=
1
,
dim_end
=
None
):
"""
"""
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
5822f7f1
...
@@ -132,16 +132,9 @@ def create_global_var(shape,
...
@@ -132,16 +132,9 @@ def create_global_var(shape,
persistable
=
persistable
,
persistable
=
persistable
,
name
=
name
,
name
=
name
,
stop_gradient
=
True
)
stop_gradient
=
True
)
print
(
"set_variable_initializer, "
,
var
.
name
)
helper
.
set_variable_initializer
(
if
imperative_base
.
enabled
():
var
,
initializer
=
Constant
(
var
=
helper
.
set_variable_initializer
(
value
=
float
(
value
),
force_cpu
=
force_cpu
))
var
,
initializer
=
Constant
(
value
=
float
(
value
),
force_cpu
=
force_cpu
))
print
(
"get var"
,
var
)
else
:
helper
.
set_variable_initializer
(
var
,
initializer
=
Constant
(
value
=
float
(
value
),
force_cpu
=
force_cpu
))
return
var
return
var
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
5822f7f1
...
@@ -109,7 +109,6 @@ class Optimizer(object):
...
@@ -109,7 +109,6 @@ class Optimizer(object):
# create learning rate variable for every parameter
# create learning rate variable for every parameter
param
=
param_and_grad
[
0
]
param
=
param_and_grad
[
0
]
param_lr
=
param
.
optimize_attr
[
'learning_rate'
]
param_lr
=
param
.
optimize_attr
[
'learning_rate'
]
print
(
"param_lr: "
,
param_lr
,
self
.
_global_learning_rate
().
_numpy
())
if
type
(
param_lr
)
==
Variable
:
if
type
(
param_lr
)
==
Variable
:
return
param_lr
return
param_lr
else
:
else
:
...
@@ -311,15 +310,12 @@ class Optimizer(object):
...
@@ -311,15 +310,12 @@ class Optimizer(object):
parameters
=
program
.
global_block
().
all_parameters
()
parameters
=
program
.
global_block
().
all_parameters
()
params_grads
=
[]
params_grads
=
[]
for
param
in
parameters
:
for
param
in
parameters
:
# create gradient variable
grad_var
=
Variable
(
grad_var
=
Variable
(
block
=
loss
.
block
,
block
=
loss
.
block
,
name
=
param
.
_ivar
.
_grad_name
(),
name
=
param
.
_ivar
.
_grad_name
(),
stop_gradient
=
True
)
stop_gradient
=
True
)
grad_var
.
_value
=
param
.
_ivar
.
_grad_var
()
grad_var
.
_value
=
param
.
_ivar
.
_grad_var
()
print
(
"create grad var: "
,
grad_var
.
name
)
print
(
"grad_var value: "
,
grad_var
.
_numpy
())
import
sys
sys
.
stdout
.
flush
()
params_grads
.
append
((
param
,
grad_var
))
params_grads
.
append
((
param
,
grad_var
))
optimize_ops
=
self
.
_create_optimization_pass
(
params_grads
,
loss
,
optimize_ops
=
self
.
_create_optimization_pass
(
params_grads
,
loss
,
...
@@ -381,10 +377,6 @@ class SGDOptimizer(Optimizer):
...
@@ -381,10 +377,6 @@ class SGDOptimizer(Optimizer):
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
print
(
"append sgd"
)
import
sys
sys
.
stdout
.
flush
()
# create the optimize op
# create the optimize op
sgd_op
=
block
.
append_op
(
sgd_op
=
block
.
append_op
(
type
=
self
.
type
,
type
=
self
.
type
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录