Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
c61e82bc
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c61e82bc
编写于
10月 04, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into dev_backward_for_op_desc
上级
37b0bb15
eed2c1e1
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
30 addition
and
27 deletion
+30
-27
paddle/framework/backward.cc
paddle/framework/backward.cc
+5
-5
paddle/operators/sgd_op.cc
paddle/operators/sgd_op.cc
+18
-15
paddle/operators/sgd_op.h
paddle/operators/sgd_op.h
+4
-4
python/paddle/v2/framework/tests/test_sgd_op.py
python/paddle/v2/framework/tests/test_sgd_op.py
+3
-3
未找到文件。
paddle/framework/backward.cc
浏览文件 @
c61e82bc
...
...
@@ -149,7 +149,7 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
for
(
size_t
output_idx
=
0
;
output_idx
<
dup_outputs
.
size
()
-
1
;
++
output_idx
)
{
auto
insert_add_x
=
dup_outputs
[
output_idx
];
auto
insert_add_y
=
dup_outputs
[
output_idx
];
auto
insert_add_y
=
dup_outputs
[
output_idx
+
1
];
auto
insert_add_out
=
name
+
"@SHARED@"
+
std
::
to_string
(
output_idx
);
// first add op inserted
if
(
output_idx
==
dup_outputs
.
size
()
-
2
)
{
...
...
@@ -160,9 +160,8 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
}
insert_position
.
push_back
(
{
dup_op
.
back
(),
OpRegistry
::
CreateOp
(
"sum"
,
{{
"X"
,
{
insert_add_x
}},
{
"X"
,
{
insert_add_y
}}},
{{
"Out"
,
{
insert_add_out
}}},
{})});
OpRegistry
::
CreateOp
(
"sum"
,
{{
"X"
,
{
insert_add_x
,
insert_add_y
}}},
{{
"Out"
,
{
insert_add_out
}}},
{})});
}
}
...
...
@@ -202,7 +201,8 @@ static std::unique_ptr<OperatorBase> BackwardRecursive(
// process recurrent gradient op as a special operator.
if
(
forwardOp
.
Type
()
==
"recurrent"
)
{
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or
// NOTE clean up cycle call somewhere (RNN's stepnet constains itself),
// or
// this will result in infinite loop.
const
auto
&
rnnop
=
*
static_cast
<
const
operators
::
RecurrentOp
*>
(
&
forwardOp
);
...
...
paddle/operators/sgd_op.cc
浏览文件 @
c61e82bc
...
...
@@ -23,19 +23,22 @@ class SGDOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContextBase
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
p
aram"
),
"Input(
p
aram) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
g
rad"
),
"Input(
g
rad) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
learning_r
ate"
),
"Input(
learning_r
ate) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"
param_o
ut"
),
"Output(
param_o
ut) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
P
aram"
),
"Input(
P
aram) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
G
rad"
),
"Input(
G
rad) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
LearningR
ate"
),
"Input(
LearningR
ate) of SGDOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"
ParamO
ut"
),
"Output(
ParamO
ut) of SGDOp should not be null."
);
auto
param_dim
=
ctx
->
GetInputDim
(
"param"
);
PADDLE_ENFORCE_EQ
(
param_dim
,
ctx
->
GetInputDim
(
"grad"
),
auto
lr_dims
=
ctx
->
GetInputDim
(
"LearningRate"
);
PADDLE_ENFORCE_EQ
(
framework
::
product
(
lr_dims
),
1
,
"Learning rate should have 1 element"
);
auto
param_dim
=
ctx
->
GetInputDim
(
"Param"
);
PADDLE_ENFORCE_EQ
(
param_dim
,
ctx
->
GetInputDim
(
"Grad"
),
"Two input of SGD Op's dimension must be same."
);
ctx
->
SetOutputDim
(
"
param_o
ut"
,
param_dim
);
ctx
->
SetOutputDim
(
"
ParamO
ut"
,
param_dim
);
}
};
...
...
@@ -43,10 +46,10 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SGDOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"
param"
,
"i
nput parameter"
);
AddInput
(
"
learning_rate"
,
"learning rate of sgd
"
);
AddInput
(
"
grad"
,
"i
nput gradient"
);
AddOutput
(
"
param_o
ut"
,
"output parameter"
);
AddInput
(
"
Param"
,
"I
nput parameter"
);
AddInput
(
"
LearningRate"
,
"Learning rate of SGD
"
);
AddInput
(
"
Grad"
,
"I
nput gradient"
);
AddOutput
(
"
ParamO
ut"
,
"output parameter"
);
AddComment
(
R"DOC(
Simplest sgd algorithm.
...
...
paddle/operators/sgd_op.h
浏览文件 @
c61e82bc
...
...
@@ -28,10 +28,10 @@ template <typename Place, typename T>
class
SGDOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
param
=
ctx
.
Input
<
Tensor
>
(
"
p
aram"
);
auto
grad
=
ctx
.
Input
<
Tensor
>
(
"
g
rad"
);
auto
param_out
=
ctx
.
Output
<
Tensor
>
(
"
param_o
ut"
);
float
lr
=
*
ctx
.
Input
<
float
>
(
"learning_rate"
)
;
auto
param
=
ctx
.
Input
<
Tensor
>
(
"
P
aram"
);
auto
grad
=
ctx
.
Input
<
Tensor
>
(
"
G
rad"
);
auto
param_out
=
ctx
.
Output
<
Tensor
>
(
"
ParamO
ut"
);
float
lr
=
ctx
.
Input
<
Tensor
>
(
"LearningRate"
)
->
data
<
float
>
()[
0
]
;
param_out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
...
...
python/paddle/v2/framework/tests/test_sgd_op.py
浏览文件 @
c61e82bc
...
...
@@ -8,10 +8,10 @@ class TestSGDOp(OpTest):
self
.
op_type
=
"sgd"
w
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
g
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
lr
=
0.1
lr
=
np
.
array
([
0.1
]).
astype
(
"float32"
)
self
.
inputs
=
{
'
param'
:
w
,
'grad'
:
g
,
'learning_r
ate'
:
lr
}
self
.
outputs
=
{
'
param_o
ut'
:
w
-
lr
*
g
}
self
.
inputs
=
{
'
Param'
:
w
,
'Grad'
:
g
,
'LearningR
ate'
:
lr
}
self
.
outputs
=
{
'
ParamO
ut'
:
w
-
lr
*
g
}
def
test_check_output
(
self
):
self
.
check_output
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录