Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
d97a2b42
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
d97a2b42
编写于
8月 08, 2017
作者:
Y
Yi Wang
提交者:
GitHub
8月 08, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3 from reyoung/feature/refactorize_framework_proto
Step 1: Make code compile well.
上级
72e3ba50
dba618c0
变更
35
展开全部
隐藏空白更改
内联
并排
Showing
35 changed file
with
927 addition
and
960 deletion
+927
-960
.gitignore
.gitignore
+2
-1
paddle/framework/attribute.cc
paddle/framework/attribute.cc
+1
-1
paddle/framework/attribute.h
paddle/framework/attribute.h
+2
-3
paddle/framework/backward.cc
paddle/framework/backward.cc
+41
-24
paddle/framework/backward_test.cc
paddle/framework/backward_test.cc
+224
-213
paddle/framework/ddim.cc
paddle/framework/ddim.cc
+6
-0
paddle/framework/ddim.h
paddle/framework/ddim.h
+2
-0
paddle/framework/framework.proto
paddle/framework/framework.proto
+3
-3
paddle/framework/grad_op_builder.cc
paddle/framework/grad_op_builder.cc
+4
-3
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+10
-6
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+33
-87
paddle/framework/op_registry_test.cc
paddle/framework/op_registry_test.cc
+28
-8
paddle/framework/operator.cc
paddle/framework/operator.cc
+44
-55
paddle/framework/operator.h
paddle/framework/operator.h
+10
-35
paddle/framework/operator_test.cc
paddle/framework/operator_test.cc
+33
-33
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+4
-3
paddle/operators/add_op.cc
paddle/operators/add_op.cc
+4
-9
paddle/operators/add_op.h
paddle/operators/add_op.h
+3
-3
paddle/operators/cross_entropy_op.cc
paddle/operators/cross_entropy_op.cc
+7
-13
paddle/operators/cross_entropy_op.h
paddle/operators/cross_entropy_op.h
+1
-1
paddle/operators/fc_op.cc
paddle/operators/fc_op.cc
+8
-8
paddle/operators/fill_zeros_like_op.cc
paddle/operators/fill_zeros_like_op.cc
+2
-10
paddle/operators/mean_op.cc
paddle/operators/mean_op.cc
+3
-5
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+3
-5
paddle/operators/net_op.cc
paddle/operators/net_op.cc
+22
-18
paddle/operators/net_op.h
paddle/operators/net_op.h
+1
-2
paddle/operators/net_op_test.cc
paddle/operators/net_op_test.cc
+10
-9
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+8
-3
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+379
-370
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+4
-6
paddle/operators/rowwise_add_op.h
paddle/operators/rowwise_add_op.h
+2
-2
paddle/operators/sgd_op.cc
paddle/operators/sgd_op.cc
+4
-8
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+1
-3
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+0
-8
paddle/platform/enforce.h
paddle/platform/enforce.h
+18
-2
未找到文件。
.gitignore
浏览文件 @
d97a2b42
...
...
@@ -24,4 +24,5 @@ cmake-build-*
python/paddle/v2/framework/core.so
CMakeFiles
cmake_install.cmake
paddle/.timestamp
python/paddlepaddle.egg-info/
paddle/framework/attribute.cc
浏览文件 @
d97a2b42
...
...
@@ -44,7 +44,7 @@ AttrType AttrTypeID<std::vector<std::string>>() {
return
STRINGS
;
}
Attribute
GetAttrValue
(
const
AttrDesc
&
attr_desc
)
{
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
)
{
switch
(
attr_desc
.
type
())
{
case
paddle
::
framework
::
AttrType
::
INT
:
{
return
attr_desc
.
i
();
...
...
paddle/framework/attribute.h
浏览文件 @
d97a2b42
...
...
@@ -21,8 +21,7 @@ limitations under the License. */
#include <unordered_set>
#include <vector>
#include "paddle/framework/attribute.pb.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/platform/enforce.h"
namespace
paddle
{
...
...
@@ -37,7 +36,7 @@ typedef std::unordered_map<std::string, Attribute> AttributeMap;
template
<
typename
T
>
AttrType
AttrTypeID
();
Attribute
GetAttrValue
(
const
AttrDesc
&
attr_desc
);
Attribute
GetAttrValue
(
const
OpDesc
::
Attr
&
attr_desc
);
// check whether a value(attribute) fit a certain limit
template
<
typename
T
>
...
...
paddle/framework/backward.cc
浏览文件 @
d97a2b42
...
...
@@ -20,15 +20,24 @@
namespace
paddle
{
namespace
framework
{
static
bool
AllInSet
(
const
std
::
vector
<
std
::
string
>&
names
,
const
std
::
string
&
suffix
,
const
std
::
unordered_set
<
std
::
string
>&
set
)
{
template
<
typename
Map
,
typename
T
>
static
void
ForEachVarName
(
Map
&
names
,
T
callback
)
{
for
(
auto
&
name
:
names
)
{
if
(
set
.
find
(
name
+
suffix
)
==
set
.
end
()
)
{
return
false
;
for
(
auto
&
n
:
name
.
second
)
{
if
(
callback
(
n
))
break
;
}
}
return
true
;
}
static
bool
AllInSet
(
const
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>&
names
,
const
std
::
string
&
suffix
,
const
std
::
unordered_set
<
std
::
string
>&
set
)
{
bool
ret_val
=
true
;
ForEachVarName
(
names
,
[
&
ret_val
,
&
set
,
&
suffix
](
const
std
::
string
&
n
)
{
ret_val
=
set
.
find
(
n
+
suffix
)
==
set
.
end
();
return
!
ret_val
;
});
return
ret_val
;
}
static
std
::
shared_ptr
<
OperatorBase
>
NOP
()
{
...
...
@@ -67,10 +76,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP.
if
(
AllInSet
(
forwardOp
.
outputs_
,
kGradVarSuffix
,
no_grad_names
))
{
for
(
auto
&
name
:
forwardOp
.
inputs_
)
{
// Mark all input is not need
no_grad_names
.
insert
(
name
+
kGradVarSuffix
);
}
ForEachVarName
(
forwardOp
.
inputs_
,
[
&
no_grad_names
](
const
std
::
string
&
name
)
->
bool
{
no_grad_names
.
insert
(
GradVarName
(
name
));
return
false
;
});
return
NOP
();
}
...
...
@@ -92,9 +102,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
auto
fwd
=
*
it
;
auto
bwd
=
BackwardRecursive
(
*
fwd
,
no_grad_names
,
uniq_id
);
net
->
AddOp
(
bwd
);
for
(
auto
&
out
:
bwd
->
outputs_
)
{
dup_output_ops
[
out
].
emplace_back
(
local_op_id
);
}
ForEachVarName
(
bwd
->
outputs_
,
[
&
dup_output_ops
,
local_op_id
](
const
std
::
string
&
out
)
{
dup_output_ops
[
out
].
emplace_back
(
local_op_id
);
return
false
;
});
}
// Get unique ID for this method.
auto
uid
=
uniq_id
++
;
...
...
@@ -116,7 +128,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
insert_position
.
push_back
(
{
dup_op
.
back
(),
OpRegistry
::
CreateOp
(
"add"
,
{
dup_outputs
},
{
name
},
"add"
,
{
{
"X"
,
{
dup_outputs
}}},
{{
"Out"
,
{
name
}}
},
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
static_cast
<
int
>
(
dup_outputs
.
size
())}}})});
}
...
...
@@ -130,7 +142,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
}
else
{
std
::
shared_ptr
<
OperatorBase
>
grad_op
=
OpRegistry
::
CreateGradOp
(
forwardOp
);
for
(
std
::
string
&
grad_input
:
grad_op
->
inputs_
)
{
ForEachVarName
(
grad_op
->
inputs_
,
[
&
no_grad_names
,
&
net
](
std
::
string
&
grad_input
)
{
if
(
no_grad_names
.
count
(
grad_input
))
{
std
::
string
prefix
=
grad_input
.
substr
(
0
,
grad_input
.
size
()
-
kGradVarSuffix
.
size
());
...
...
@@ -138,16 +152,19 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient.
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"fill_zeros_like"
,
{
prefix
},
{
grad_input
},
{}));
net
->
AddOp
(
OpRegistry
::
CreateOp
(
"fill_zeros_like"
,
{
{
"Src"
,
{
prefix
}}
},
{
{
"Dst"
,
{
grad_input
}}
},
{}));
}
}
for
(
std
::
string
&
grad_output
:
grad_op
->
outputs_
)
{
if
(
no_grad_names
.
count
(
grad_output
))
{
grad_output
=
kEmptyVarName
;
}
}
return
false
;
});
ForEachVarName
(
grad_op
->
outputs_
,
[
&
no_grad_names
](
std
::
string
&
grad_output
)
{
if
(
no_grad_names
.
count
(
grad_output
))
{
grad_output
=
kEmptyVarName
;
}
return
false
;
});
if
(
net
->
ops_
.
empty
())
{
// Current no aux op is added to network
return
grad_op
;
...
...
paddle/framework/backward_test.cc
浏览文件 @
d97a2b42
...
...
@@ -44,8 +44,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker {
public:
MulOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"
A
"
,
"A"
);
AddInput
(
"
B
"
,
"B"
);
AddInput
(
"
X
"
,
"A"
);
AddInput
(
"
Y
"
,
"B"
);
AddOutput
(
"Out"
,
"Out"
);
AddComment
(
"Mul"
);
}
...
...
@@ -56,7 +56,7 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
SigmoidOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"X"
);
AddOutput
(
"
Y
"
,
"Y"
);
AddOutput
(
"
Out
"
,
"Y"
);
AddComment
(
"Sigmoid"
);
}
};
...
...
@@ -66,7 +66,7 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
NoGradOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"X input"
);
AddOutput
(
"
Y
"
,
"Y output"
);
AddOutput
(
"
Out
"
,
"Y output"
);
AddComment
(
"NoGradOp, same input output. no Grad"
);
}
};
...
...
@@ -74,13 +74,15 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
class
FcOp
:
public
ops
::
NetOp
{
public:
void
Init
()
override
{
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
Input
(
"X"
),
Input
(
"W"
)},
{
Output
(
"mul_result"
)},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{{
"X"
,
{
Input
(
"X"
)}},
{
"Y"
,
{
Input
(
"W"
)}}},
{{
"Out"
,
{
Output
(
"mul_result"
)}}},
{}));
auto
b_name
=
Input
(
"b"
);
std
::
string
before_act
=
"mul_result"
;
if
(
b_name
!=
kEmptyVarName
)
{
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"mul_result"
),
b_name
},
{
Output
(
"add_result"
)},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{{
"X"
,
{
Output
(
"mul_result"
)}},
{
"b"
,
{
b_name
}}},
{{
"Out"
,
{
Output
(
"add_result"
)}}},
{}));
before_act
=
"add_result"
;
}
else
{
auto
out_varname
=
Output
(
"add_result"
);
...
...
@@ -89,8 +91,8 @@ class FcOp : public ops::NetOp {
}
}
AddOp
(
OpRegistry
::
CreateOp
(
"sigmoid"
,
{
Output
(
before_act
)},
{
Output
(
"Out"
)
},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"sigmoid"
,
{
{
"X"
,
{
Output
(
before_act
)}}
},
{
{
"Out"
,
{
Output
(
"Out"
)}}},
{
}));
CompleteAddOp
(
false
);
}
};
...
...
@@ -158,206 +160,215 @@ REGISTER_OP(fc, f::FcOp, f::FcOpMaker);
REGISTER_OP
(
many_output_op
,
f
::
EmptyOp
,
f
::
ManyOutputOpMaker
);
REGISTER_GRADIENT_OP
(
many_output_op
,
many_output_op_grad
,
f
::
EmptyOp
);
TEST
(
Backward
,
simple_op_grad
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
auto
gop
=
f
::
OpRegistry
::
CreateGradOp
(
*
fwd
);
ASSERT_EQ
(
4UL
,
gop
->
inputs_
.
size
());
ASSERT_EQ
(
f
::
kEmptyVarName
,
gop
->
inputs_
[
0
]);
ASSERT_EQ
(
"rowwise_add_grad"
,
gop
->
type_
);
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
gop
->
outputs_
[
0
]);
ASSERT_EQ
(
"b"
+
f
::
kGradVarSuffix
,
gop
->
outputs_
[
1
]);
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
gop
->
Output
(
"X"
+
f
::
kGradVarSuffix
));
}
TEST
(
Backward
,
simple_op_not_need_grad
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
auto
gop
=
f
::
Backward
(
*
fwd
,
{
"X"
});
ASSERT_EQ
(
std
::
find
(
gop
->
outputs_
.
begin
(),
gop
->
outputs_
.
end
(),
"X"
+
f
::
kGradVarSuffix
),
gop
->
outputs_
.
end
());
auto
no_input_gop
=
f
::
Backward
(
*
fwd
,
{
"X"
,
"b"
});
ASSERT_NE
(
no_input_gop
,
nullptr
);
ASSERT_TRUE
(
no_input_gop
->
IsNetOp
());
ASSERT_EQ
(
0UL
,
std
::
static_pointer_cast
<
ops
::
NetOp
>
(
no_input_gop
)
->
ops_
.
size
());
}
TEST
(
Backward
,
net_fc_backward_normal
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
fwd
=
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"w"
,
"b"
},
{
"mul_result"
,
"add_result"
,
"out"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
std
::
shared_ptr
<
f
::
OperatorBase
>
gop
=
f
::
Backward
(
*
fwd
,
{});
ASSERT_TRUE
(
gop
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
gop
.
get
());
ASSERT_NO_THROW
(
net
->
DebugString
());
ASSERT_EQ
(
3UL
,
net
->
ops_
.
size
());
f
::
OperatorBase
&
d_sigmoid
=
*
net
->
ops_
[
0
];
ASSERT_EQ
(
"sigmoid_grad"
,
d_sigmoid
.
type_
);
f
::
OperatorBase
&
d_add
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"rowwise_add_grad"
,
d_add
.
type_
);
f
::
OperatorBase
&
d_mul
=
*
net
->
ops_
[
2
];
ASSERT_EQ
(
"mul_grad"
,
d_mul
.
type_
);
}
TEST
(
Backward
,
net_fc_backward_not_have_b
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
fwd
=
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"w"
,
f
::
kEmptyVarName
},
{
"mul_result"
,
"add_result"
,
"tmp"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
std
::
shared_ptr
<
f
::
OperatorBase
>
gop
=
f
::
Backward
(
*
fwd
,
{});
ASSERT_TRUE
(
gop
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
gop
.
get
());
ASSERT_NO_THROW
(
net
->
DebugString
());
ASSERT_EQ
(
2UL
,
net
->
ops_
.
size
());
f
::
OperatorBase
&
d_sigmoid
=
*
net
->
ops_
[
0
];
ASSERT_EQ
(
"sigmoid_grad"
,
d_sigmoid
.
type_
);
f
::
OperatorBase
&
d_mul
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"mul_grad"
,
d_mul
.
type_
);
}
TEST
(
Backward
,
net_input_of_network_not_need_grad
)
{
ops
::
NetOp
net
;
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"W1"
,
"b1"
},
{
"mul_tmp_0"
,
"add_tmp_0"
,
"hidden0"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"hidden0"
,
"W2"
,
"b2"
},
{
"mul_tmp_1"
,
"add_tmp_1"
,
"hidden1"
},
{}));
net
.
CompleteAddOp
();
auto
bwd
=
Backward
(
net
,
{
"X"
});
// X@GRAD is not need.
ASSERT_TRUE
(
bwd
->
IsNetOp
());
auto
bwd_net
=
static_cast
<
ops
::
NetOp
*>
(
bwd
.
get
());
std
::
unordered_set
<
std
::
string
>
all_output
=
std
::
unordered_set
<
std
::
string
>
(
bwd_net
->
outputs_
.
begin
(),
bwd_net
->
outputs_
.
end
());
all_output
.
erase
(
f
::
kEmptyVarName
);
for
(
auto
&
out
:
{
"W1"
,
"b1"
,
"hidden0"
,
"W2"
,
"b2"
})
{
ASSERT_NE
(
all_output
.
find
(
out
+
f
::
kGradVarSuffix
),
all_output
.
end
());
}
// Not Generated X
ASSERT_EQ
(
all_output
.
find
(
"X"
+
f
::
kGradVarSuffix
),
all_output
.
end
());
ASSERT_EQ
(
2UL
,
bwd_net
->
ops_
.
size
());
ASSERT_TRUE
(
bwd_net
->
ops_
[
1
]
->
IsNetOp
());
auto
first_fc_grad
=
static_cast
<
ops
::
NetOp
*>
(
bwd_net
->
ops_
[
1
].
get
());
ASSERT_EQ
(
3UL
,
first_fc_grad
->
ops_
.
size
());
ASSERT_EQ
(
f
::
kEmptyVarName
,
first_fc_grad
->
ops_
[
2
]
->
Output
(
"A"
+
f
::
kGradVarSuffix
));
}
TEST
(
Backward
,
net_shared_weight
)
{
ops
::
NetOp
net
;
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"mul"
,
{
"X"
,
"W"
},
{
"Out"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"mul"
,
{
"Out"
,
"W"
},
{
"FinalOut"
},
{}));
net
.
CompleteAddOp
();
auto
bwd
=
f
::
Backward
(
net
,
{});
ASSERT_TRUE
(
bwd
->
IsNetOp
());
auto
bwd_net
=
static_cast
<
ops
::
NetOp
*>
(
bwd
.
get
());
ASSERT_EQ
(
3UL
,
bwd_net
->
ops_
.
size
());
ASSERT_EQ
(
"add"
,
bwd_net
->
ops_
[
2
]
->
type_
);
}
TEST
(
Backward
,
op_register_grad_not_for_network
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"W"
,
"b"
},
{
"mul_out"
,
"add_out"
,
"out1"
},
{{
"temporary_index"
,
std
::
vector
<
int
>
{
0
,
1
}}});
ASSERT_THROW
(
f
::
OpRegistry
::
CreateGradOp
(
*
fwd
),
EnforceNotMet
);
}
TEST
(
Backward
,
op_all_input_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"X"
,
"b"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_TRUE
(
net
->
ops_
.
empty
());
}
TEST
(
Backward
,
op_all_output_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
"X"
,
"b"
},
{
"Out"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"Out"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_TRUE
(
net
->
ops_
.
empty
());
}
TEST
(
Backward
,
op_part_of_output_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"many_output_op"
,
{
"X"
},
{
"Y"
,
"Z"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"Z"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_EQ
(
net
->
ops_
.
size
(),
2UL
);
auto
&
fill_zero
=
*
net
->
ops_
[
0
];
ASSERT_EQ
(
"fill_zeros_like"
,
fill_zero
.
type_
);
ASSERT_EQ
(
1UL
,
fill_zero
.
inputs_
.
size
());
ASSERT_EQ
(
"Z"
,
fill_zero
.
inputs_
[
0
]);
ASSERT_EQ
(
1UL
,
fill_zero
.
outputs_
.
size
());
ASSERT_EQ
(
"Z"
+
f
::
kZeroVarSuffix
,
fill_zero
.
outputs_
[
0
]);
auto
&
d_many_out
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"many_output_op_grad"
,
d_many_out
.
type_
);
ASSERT_EQ
(
1UL
+
2UL
+
2UL
,
d_many_out
.
inputs_
.
size
());
// I/O/OG
ASSERT_EQ
(
"Z"
+
f
::
kZeroVarSuffix
,
d_many_out
.
Input
(
"z"
+
f
::
kGradVarSuffix
));
ASSERT_EQ
(
"Y"
+
f
::
kGradVarSuffix
,
d_many_out
.
Input
(
"y"
+
f
::
kGradVarSuffix
));
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
d_many_out
.
Output
(
"x"
+
f
::
kGradVarSuffix
));
}
TEST
(
Backward
,
op_part_of_input_are_not_need
)
{
auto
fwd
=
f
::
OpRegistry
::
CreateOp
(
"mul"
,
{
"a"
,
"b"
},
{
"out"
},
{});
auto
backward
=
f
::
Backward
(
*
fwd
,
{
"a"
});
auto
&
grad_mul
=
*
backward
;
ASSERT_EQ
(
grad_mul
.
type_
,
"mul_grad"
);
ASSERT_EQ
(
grad_mul
.
inputs_
.
size
(),
2UL
+
1UL
+
1UL
);
ASSERT_EQ
(
grad_mul
.
outputs_
.
size
(),
2UL
);
ASSERT_EQ
(
grad_mul
.
Output
(
"A"
+
f
::
kGradVarSuffix
),
f
::
kEmptyVarName
);
ASSERT_EQ
(
grad_mul
.
Output
(
"B"
+
f
::
kGradVarSuffix
),
"b"
+
f
::
kGradVarSuffix
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
+
f
::
kGradVarSuffix
),
"out"
+
f
::
kGradVarSuffix
);
ASSERT_EQ
(
grad_mul
.
Input
(
"A"
),
"a"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"B"
),
"b"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
),
"out"
);
}
TEST
(
Backward
,
linear_net_intermediate_variable_has_no_grad
)
{
ops
::
NetOp
net
;
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"x1"
,
"w1"
,
"b1"
},
{
"mul_out1"
,
"add_out1"
,
"out1"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"out1"
,
"w2"
,
"b2"
},
{
"mul_out2"
,
"tmp_out2"
,
"out2"
},
{}));
net
.
AddOp
(
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"out2"
,
"w3"
,
"b3"
},
{
"mul_out3"
,
"tmp_out3"
,
"out3"
},
{}));
net
.
CompleteAddOp
();
auto
backward
=
f
::
Backward
(
net
,
{
"mul_out2"
,
"tmp_out2"
,
"out2"
});
ASSERT_TRUE
(
backward
->
IsNetOp
());
auto
bwd_net
=
static_cast
<
ops
::
NetOp
*>
(
backward
.
get
());
ASSERT_EQ
(
bwd_net
->
ops_
.
size
(),
3UL
);
auto
&
grad_fc
=
*
bwd_net
->
ops_
[
0
];
EXPECT_EQ
(
grad_fc
.
inputs_
.
size
(),
3UL
/* external input number */
+
1UL
/* external output number*/
+
1UL
/* number of gradient of external output*/
+
2U
/* internal variable number*/
);
EXPECT_EQ
(
grad_fc
.
outputs_
.
size
(),
2UL
/* input number of mul*/
+
2UL
/* input number of rowwise_add */
+
1UL
/* input number of sigmod */
);
EXPECT_EQ
(
bwd_net
->
ops_
[
1
]
->
inputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
1
]
->
outputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
inputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
outputs_
.
size
(),
0UL
);
}
//
// TEST(Backward, simple_op_grad) {
// auto fwd = f::OpRegistry::CreateOp(
// "rowwise_add", {{"X", {"X"}}, {"b", {"b"}}}, {{"Out", {"Out"}}}, {});
// ASSERT_NE(fwd, nullptr);
// auto gop = f::OpRegistry::CreateGradOp(*fwd);
// ASSERT_EQ(4UL, gop->inputs_.size());
// ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]);
// ASSERT_EQ("rowwise_add_grad", gop->type_);
// ASSERT_EQ("X" + f::kGradVarSuffix, gop->outputs_[0]);
// ASSERT_EQ("b" + f::kGradVarSuffix, gop->outputs_[1]);
//
// ASSERT_EQ("X" + f::kGradVarSuffix, gop->Output("X" + f::kGradVarSuffix));
//}
//
// TEST(Backward, simple_op_not_need_grad) {
// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// ASSERT_NE(fwd, nullptr);
// auto gop = f::Backward(*fwd, {"X"});
// ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(),
// "X" + f::kGradVarSuffix),
// gop->outputs_.end());
//
// auto no_input_gop = f::Backward(*fwd, {"X", "b"});
// ASSERT_NE(no_input_gop, nullptr);
// ASSERT_TRUE(no_input_gop->IsNetOp());
// ASSERT_EQ(0UL,
// std::static_pointer_cast<ops::NetOp>(no_input_gop)->ops_.size());
//}
//
// TEST(Backward, net_fc_backward_normal) {
// std::shared_ptr<f::OperatorBase> fwd = f::OpRegistry::CreateOp(
// "fc", {"X", "w", "b"}, {"mul_result", "add_result", "out"}, {});
// ASSERT_NE(fwd, nullptr);
// std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
// ASSERT_TRUE(gop->IsNetOp());
// auto net = static_cast<ops::NetOp *>(gop.get());
//
// ASSERT_NO_THROW(net->DebugString());
//
// ASSERT_EQ(3UL, net->ops_.size());
//
// f::OperatorBase &d_sigmoid = *net->ops_[0];
// ASSERT_EQ("sigmoid_grad", d_sigmoid.type_);
//
// f::OperatorBase &d_add = *net->ops_[1];
// ASSERT_EQ("rowwise_add_grad", d_add.type_);
//
// f::OperatorBase &d_mul = *net->ops_[2];
// ASSERT_EQ("mul_grad", d_mul.type_);
//}
//
// TEST(Backward, net_fc_backward_not_have_b) {
// std::shared_ptr<f::OperatorBase> fwd =
// f::OpRegistry::CreateOp("fc", {"X", "w", f::kEmptyVarName},
// {"mul_result", "add_result", "tmp"}, {});
// ASSERT_NE(fwd, nullptr);
// std::shared_ptr<f::OperatorBase> gop = f::Backward(*fwd, {});
// ASSERT_TRUE(gop->IsNetOp());
// auto net = static_cast<ops::NetOp *>(gop.get());
//
// ASSERT_NO_THROW(net->DebugString());
//
// ASSERT_EQ(2UL, net->ops_.size());
//
// f::OperatorBase &d_sigmoid = *net->ops_[0];
// ASSERT_EQ("sigmoid_grad", d_sigmoid.type_);
//
// f::OperatorBase &d_mul = *net->ops_[1];
// ASSERT_EQ("mul_grad", d_mul.type_);
//}
//
// TEST(Backward, net_input_of_network_not_need_grad) {
// ops::NetOp net;
// net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"},
// {"mul_tmp_0", "add_tmp_0", "hidden0"},
// {}));
// net.AddOp(f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"},
// {"mul_tmp_1", "add_tmp_1", "hidden1"},
// {}));
// net.CompleteAddOp();
// auto bwd = Backward(net, {"X"}); // X@GRAD is not need.
// ASSERT_TRUE(bwd->IsNetOp());
// auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
//
// std::unordered_set<std::string> all_output =
// std::unordered_set<std::string>(
// bwd_net->outputs_.begin(), bwd_net->outputs_.end());
// all_output.erase(f::kEmptyVarName);
//
// for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) {
// ASSERT_NE(all_output.find(out + f::kGradVarSuffix), all_output.end());
// }
//
// // Not Generated X
// ASSERT_EQ(all_output.find("X" + f::kGradVarSuffix), all_output.end());
//
// ASSERT_EQ(2UL, bwd_net->ops_.size());
// ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp());
// auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get());
// ASSERT_EQ(3UL, first_fc_grad->ops_.size());
// ASSERT_EQ(f::kEmptyVarName,
// first_fc_grad->ops_[2]->Output("A" + f::kGradVarSuffix));
//}
//
// TEST(Backward, net_shared_weight) {
// ops::NetOp net;
// net.AddOp(f::OpRegistry::CreateOp("mul", {"X", "W"}, {"Out"}, {}));
// net.AddOp(f::OpRegistry::CreateOp("mul", {"Out", "W"}, {"FinalOut"}, {}));
// net.CompleteAddOp();
//
// auto bwd = f::Backward(net, {});
// ASSERT_TRUE(bwd->IsNetOp());
// auto bwd_net = static_cast<ops::NetOp *>(bwd.get());
// ASSERT_EQ(3UL, bwd_net->ops_.size());
// ASSERT_EQ("add", bwd_net->ops_[2]->type_);
//}
//
// TEST(Backward, op_register_grad_not_for_network) {
// auto fwd = f::OpRegistry::CreateOp(
// "fc", {"X", "W", "b"}, {"mul_out", "add_out", "out1"},
// {{"temporary_index", std::vector<int>{0, 1}}});
//
// ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet);
//}
//
// TEST(Backward, op_all_input_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// auto backward = f::Backward(*fwd, {"X", "b"});
// ASSERT_TRUE(backward->IsNetOp());
// auto net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_TRUE(net->ops_.empty());
//}
//
// TEST(Backward, op_all_output_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// auto backward = f::Backward(*fwd, {"Out"});
// ASSERT_TRUE(backward->IsNetOp());
// auto net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_TRUE(net->ops_.empty());
//}
//
// TEST(Backward, op_part_of_output_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("many_output_op", {"X"}, {"Y", "Z"}, {});
// auto backward = f::Backward(*fwd, {"Z"});
// ASSERT_TRUE(backward->IsNetOp());
// auto net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_EQ(net->ops_.size(), 2UL);
//
// auto &fill_zero = *net->ops_[0];
// ASSERT_EQ("fill_zeros_like", fill_zero.type_);
// ASSERT_EQ(1UL, fill_zero.inputs_.size());
// ASSERT_EQ("Z", fill_zero.inputs_[0]);
// ASSERT_EQ(1UL, fill_zero.outputs_.size());
// ASSERT_EQ("Z" + f::kZeroVarSuffix, fill_zero.outputs_[0]);
//
// auto &d_many_out = *net->ops_[1];
// ASSERT_EQ("many_output_op_grad", d_many_out.type_);
// ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG
// ASSERT_EQ("Z" + f::kZeroVarSuffix, d_many_out.Input("z" +
// f::kGradVarSuffix));
// ASSERT_EQ("Y" + f::kGradVarSuffix, d_many_out.Input("y" +
// f::kGradVarSuffix));
// ASSERT_EQ("X" + f::kGradVarSuffix,
// d_many_out.Output("x" + f::kGradVarSuffix));
//}
//
// TEST(Backward, op_part_of_input_are_not_need) {
// auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {});
// auto backward = f::Backward(*fwd, {"a"});
// auto &grad_mul = *backward;
// ASSERT_EQ(grad_mul.type_, "mul_grad");
// ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL);
// ASSERT_EQ(grad_mul.outputs_.size(), 2UL);
// ASSERT_EQ(grad_mul.Output("A" + f::kGradVarSuffix), f::kEmptyVarName);
// ASSERT_EQ(grad_mul.Output("B" + f::kGradVarSuffix), "b" +
// f::kGradVarSuffix);
// ASSERT_EQ(grad_mul.Input("Out" + f::kGradVarSuffix),
// "out" + f::kGradVarSuffix);
// ASSERT_EQ(grad_mul.Input("A"), "a");
// ASSERT_EQ(grad_mul.Input("B"), "b");
// ASSERT_EQ(grad_mul.Input("Out"), "out");
//}
//
// TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
// ops::NetOp net;
// net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"},
// {"mul_out1", "add_out1", "out1"}, {}));
// net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"},
// {"mul_out2", "tmp_out2", "out2"}, {}));
// net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"},
// {"mul_out3", "tmp_out3", "out3"}, {}));
// net.CompleteAddOp();
// auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"});
// ASSERT_TRUE(backward->IsNetOp());
// auto bwd_net = static_cast<ops::NetOp *>(backward.get());
// ASSERT_EQ(bwd_net->ops_.size(), 3UL);
// auto &grad_fc = *bwd_net->ops_[0];
// EXPECT_EQ(grad_fc.inputs_.size(),
// 3UL /* external input number */
// + 1UL /* external output number*/
// + 1UL /* number of gradient of external output*/
// + 2U /* internal variable number*/);
// EXPECT_EQ(grad_fc.outputs_.size(), 2UL /* input number of mul*/
// + 2UL /* input number of rowwise_add
// */
// + 1UL /* input number of sigmod */);
// EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL);
// EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL);
// EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL);
// EXPECT_EQ(bwd_net->ops_[2]->outputs_.size(), 0UL);
//}
paddle/framework/ddim.cc
浏览文件 @
d97a2b42
...
...
@@ -284,5 +284,11 @@ DDim::DDim(std::initializer_list<int> init_list) {
*
this
=
make_ddim
(
init_list
);
}
std
::
string
DDim
::
DebugString
()
const
{
std
::
ostringstream
ss
;
ss
<<
*
this
;
return
ss
.
str
();
}
}
// namespace framework
}
// namespace paddle
paddle/framework/ddim.h
浏览文件 @
d97a2b42
...
...
@@ -73,6 +73,8 @@ struct DDim {
DDim
operator
*
(
DDim
d
)
const
;
ssize_t
size
()
const
;
std
::
string
DebugString
()
const
;
};
/**
...
...
paddle/framework/framework.proto
浏览文件 @
d97a2b42
...
...
@@ -40,8 +40,8 @@ message OpDesc {
};
message
Var
{
required
string
name
;
// e.g. "X"
optional
int
dup
=
2
[
default
=
0
];
// e.g., "1"
required
string
op_proto_name
=
1
;
repeated
string
var_names
=
2
;
};
required
string
type
=
3
;
...
...
@@ -57,7 +57,7 @@ message OpProto {
message
Var
{
required
string
name
=
1
;
required
string
comment
=
2
;
// OpDesc::Var::dup indices the duplica.
optional
bool
duplicable
=
3
[
default
=
false
];
optional
bool
intermediate
=
4
[
default
=
false
];
optional
bool
no_gradient
=
5
[
default
=
false
];
...
...
paddle/framework/grad_op_builder.cc
浏览文件 @
d97a2b42
...
...
@@ -13,12 +13,12 @@ express or implied. See the License for the specific language governing
permissions and limitations under the License. */
#include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/
op_proto
.pb.h"
#include "paddle/framework/
framework
.pb.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
framework
{
/**
class OpRegistry;
using VarIndexMap = std::unordered_map<std::string, int>;
...
...
@@ -98,6 +98,7 @@ OperatorBase* BuildGradOp(const OperatorBase* op) {
TransOpArg(op, grad_op, OpArgType::IN, OpArgType::OUT, out_idx, true); // IG
return grad_op;
}
**/
OperatorBase
*
BuildGradOp
(
const
OperatorBase
*
op
)
{
return
nullptr
;
}
}
// namespace framework
}
// namespace paddle
paddle/framework/grad_op_builder_test.cc
浏览文件 @
d97a2b42
...
...
@@ -47,8 +47,8 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
namespace
f
=
paddle
::
framework
;
TEST
(
GradOpBuilder
,
AddTwo
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
add_op
(
f
::
OpRegistry
::
CreateOp
(
"add_two"
,
{
"x"
,
"y"
},
{
"out"
},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
add_op
(
f
::
OpRegistry
::
CreateOp
(
"add_two"
,
{{
"X"
,
{
"x"
}},
{
"Y"
,
{
"y"
}}},
{{
"Out"
,
{
"out"
}}
},
{}));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_add_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
add_op
);
EXPECT_EQ
(
static_cast
<
int
>
(
grad_add_op
->
inputs_
.
size
()),
4
);
...
...
@@ -70,8 +70,10 @@ TEST(GradOpBuilder, MutiInOut) {
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
4
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"mult_io"
,
{
"in1"
,
"in2_1"
,
"in2_2"
,
"in2_3"
,
"in3"
},
{
"out1"
,
"out2_1"
,
"out2_2"
},
attrs
));
"mult_io"
,
{{
"In1"
,
{
"in1"
}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
,
"in2_3"
}},
{
"In3"
,
{
"in3"
}}},
{{
"Out1"
,
{
"Out2_mult"
}},
{
"Out2"
,
{
"out2_1"
,
"out2_2"
}}},
attrs
));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
...
...
@@ -104,8 +106,10 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
f
::
AttributeMap
attrs
{{
"input_format"
,
std
::
vector
<
int
>
{
0
,
1
,
3
,
5
}},
{
"output_format"
,
std
::
vector
<
int
>
{
0
,
2
,
3
}}};
std
::
shared_ptr
<
f
::
OperatorBase
>
test_op
(
f
::
OpRegistry
::
CreateOp
(
"io_ignored"
,
{
"in1"
,
"in2_1"
,
"in2_2"
,
"in3_1"
,
"in3_2"
},
{
"out1_1"
,
"out1_2"
,
"out2"
},
attrs
));
"io_ignored"
,
{{
"In1"
,
{
"in1"
}},
{
"In2_mult"
,
{
"in2_1"
,
"in2_2"
}},
{
"In3_mult"
,
{
"in3_1"
,
"in3_2"
}}},
{{
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
}},
{
"Out2"
,
{
"out2"
}}},
attrs
));
std
::
shared_ptr
<
f
::
OperatorBase
>
grad_test_op
=
f
::
OpRegistry
::
CreateGradOp
(
*
test_op
);
...
...
paddle/framework/op_registry.h
浏览文件 @
d97a2b42
...
...
@@ -20,8 +20,8 @@ limitations under the License. */
#include <unordered_map>
#include <unordered_set>
#include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/scope.h"
namespace
paddle
{
...
...
@@ -44,25 +44,20 @@ class OpProtoAndCheckerMaker {
protected:
struct
VariableBuilder
{
VarProto
*
var_
;
std
::
function
<
void
()
>
on_multiple_
;
std
::
function
<
void
()
>
on_temporary_
;
OpProto
::
Var
*
var_
;
VariableBuilder
&
SetMultiple
()
{
var_
->
set_multiple
(
true
);
on_multiple_
();
var_
->
set_duplicable
(
true
);
return
*
this
;
}
VariableBuilder
&
SetTemporary
()
{
PADDLE_ENFORCE
(
bool
(
on_temporary_
),
"Cannot set temporary"
);
var_
->
set_temporary
(
true
);
on_temporary_
();
var_
->
set_intermediate
(
true
);
return
*
this
;
}
VariableBuilder
&
IgnoreGradient
()
{
var_
->
set_
ignore
_gradient
(
true
);
var_
->
set_
no
_gradient
(
true
);
return
*
this
;
}
};
...
...
@@ -72,8 +67,7 @@ class OpProtoAndCheckerMaker {
auto
input
=
proto_
->
mutable_inputs
()
->
Add
();
*
input
->
mutable_name
()
=
name
;
*
input
->
mutable_comment
()
=
comment
;
return
VariableBuilder
{
input
,
[
=
]
{
this
->
SetHasMultipleInput
();
},
nullptr
};
return
VariableBuilder
{
input
};
}
VariableBuilder
AddOutput
(
const
std
::
string
&
name
,
...
...
@@ -81,8 +75,7 @@ class OpProtoAndCheckerMaker {
auto
output
=
proto_
->
mutable_outputs
()
->
Add
();
*
output
->
mutable_name
()
=
name
;
*
output
->
mutable_comment
()
=
comment
;
return
VariableBuilder
{
output
,
[
=
]
{
this
->
SetHasMultipleOutput
();
},
[
=
]
{
this
->
SetHasTemporaryOutput
();
}};
return
VariableBuilder
{
output
};
}
template
<
typename
T
>
...
...
@@ -102,53 +95,6 @@ class OpProtoAndCheckerMaker {
}
private:
void
SetHasMultiple
(
const
std
::
string
&
in_out
,
bool
*
flag
)
{
if
(
!*
flag
)
{
AddAttr
<
std
::
vector
<
int
>>
(
in_out
+
"_format"
,
"The multiple index of "
+
in_out
+
"
\n
"
R
"DOC(
This attribute is used by Paddle core framework. Paddle's Op support each input
or output could be a list of variable. This attribute is used to show how that
list organized.
e.g.
input = ["
a
", "
b
", "
c
", "
d
", "
e
", "
f
"]
input_format = [0, 4, 5, 6]
means
The number of all input variables this op is six, and they are segmented into
three inputs.
The first input is input[0:4], second is input[4:5], third is input[5:6].
)DOC"
,
/*generated*/
true
);
*
flag
=
true
;
}
}
void
SetHasMultipleInput
()
{
SetHasMultiple
(
"input"
,
&
has_multiple_input_
);
}
void
SetHasMultipleOutput
()
{
SetHasMultiple
(
"output"
,
&
has_multiple_output_
);
}
void
SetHasTemporaryOutput
()
{
if
(
!
has_temporary_output_
)
{
AddAttr
<
std
::
vector
<
int
>>
(
"temporary_index"
,
R
"DOC(The temporary index of output.
Not all output of Paddle Op is used by user. For faster computation, each op
could output some its internal state to other op, other op could take that
output to make compute faster.
Add a mark to which output is temporary is helpful for future optimization.
)DOC"
,
/*generated*/
true
)
.
SetDefault
(
std
::
vector
<
int
>
());
has_temporary_output_
=
true
;
}
}
void
CheckNoDuplicatedInOutAttrs
()
{
std
::
unordered_set
<
std
::
string
>
names
;
auto
checker
=
[
&
](
const
std
::
string
&
name
)
{
...
...
@@ -169,15 +115,12 @@ Add a mark to which output is temporary is helpful for future optimization.
OpProto
*
proto_
;
OpAttrChecker
*
op_checker_
;
bool
validated_
{
false
};
bool
has_multiple_input_
{
false
};
bool
has_multiple_output_
{
false
};
bool
has_temporary_output_
{
false
};
};
class
OpRegistry
{
using
OpCreator
=
std
::
function
<
OperatorBase
*
()
>
;
using
VarIndexMap
=
std
::
unordered_map
<
std
::
string
,
int
>
;
using
VarName
List
=
std
::
vector
<
std
::
string
>
;
using
VarName
Map
=
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>
>
;
public:
template
<
typename
OpType
,
typename
ProtoMakerType
>
...
...
@@ -213,8 +156,8 @@ class OpRegistry {
}
static
std
::
shared_ptr
<
OperatorBase
>
CreateOp
(
const
std
::
string
&
type
,
const
VarName
List
&
inputs
,
const
VarName
List
&
outputs
,
const
VarName
Map
&
inputs
,
const
VarName
Map
&
outputs
,
const
AttributeMap
&
attrs
)
{
auto
op_create_it
=
op_creators
().
find
(
type
);
PADDLE_ENFORCE
(
op_create_it
!=
op_creators
().
end
(),
...
...
@@ -230,27 +173,28 @@ class OpRegistry {
GenerateTempVariableName
(
op
);
{
auto
var_index_it
=
VarIndexMaps
().
find
(
type
);
if
(
var_index_it
!=
VarIndexMaps
().
end
())
{
op
->
in_out_idxs_
=
var_index_it
->
second
;
}
}
op
->
Init
();
return
std
::
shared_ptr
<
OperatorBase
>
(
op
);
}
static
std
::
shared_ptr
<
OperatorBase
>
CreateOp
(
const
OpDesc
&
op_desc
)
{
std
::
vector
<
std
::
string
>
inputs
;
inputs
.
reserve
((
size_t
)
op_desc
.
inputs_size
());
std
::
copy
(
op_desc
.
inputs
().
begin
(),
op_desc
.
inputs
().
end
(),
std
::
back_inserter
(
inputs
));
VarNameMap
inputs
;
for
(
auto
&
input
:
op_desc
.
inputs
())
{
auto
&
var_names
=
inputs
[
input
.
op_proto_name
()];
auto
&
var_names_in_proto
=
input
.
var_names
();
var_names
.
reserve
(
static_cast
<
size_t
>
(
var_names_in_proto
.
size
()));
std
::
copy
(
var_names_in_proto
.
begin
(),
var_names_in_proto
.
end
(),
std
::
back_inserter
(
var_names
));
}
std
::
vector
<
std
::
string
>
outputs
;
outputs
.
reserve
((
size_t
)
op_desc
.
outputs_size
());
std
::
copy
(
op_desc
.
outputs
().
begin
(),
op_desc
.
outputs
().
end
(),
std
::
back_inserter
(
outputs
));
VarNameMap
outputs
;
for
(
auto
&
output
:
op_desc
.
outputs
())
{
auto
&
var_names
=
outputs
[
output
.
op_proto_name
()];
auto
&
var_names_in_proto
=
output
.
var_names
();
var_names
.
reserve
(
static_cast
<
size_t
>
(
var_names_in_proto
.
size
()));
std
::
copy
(
var_names_in_proto
.
begin
(),
var_names_in_proto
.
end
(),
std
::
back_inserter
(
var_names
));
}
AttributeMap
attrs
;
for
(
auto
&
attr
:
op_desc
.
attrs
())
{
...
...
@@ -303,11 +247,13 @@ class OpRegistry {
static
void
GenerateTempVariableName
(
OperatorBase
*
op
)
{
static
std
::
atomic
<
size_t
>
gUniqId
(
0UL
);
for
(
auto
&
outname
:
op
->
outputs_
)
{
if
(
outname
==
kTempVarName
)
{
outname
+=
op
->
type_
;
outname
+=
"@"
;
outname
+=
std
::
to_string
(
gUniqId
.
fetch_add
(
1
));
for
(
auto
&
output
:
op
->
outputs_
)
{
for
(
auto
&
output_name
:
output
.
second
)
{
if
(
output_name
==
kTempVarName
)
{
output_name
+=
op
->
type_
;
output_name
+=
"@"
;
output_name
+=
std
::
to_string
(
gUniqId
.
fetch_add
(
1
));
}
}
}
}
...
...
paddle/framework/op_registry_test.cc
浏览文件 @
d97a2b42
...
...
@@ -57,8 +57,13 @@ REGISTER_OP(my_test_op, paddle::framework::MyTestOp,
TEST
(
OpRegistry
,
CreateOp
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"cos_sim"
);
op_desc
.
add_inputs
(
"aa"
);
op_desc
.
add_outputs
(
"bb"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"aa"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"bb"
;
float
scale
=
3.3
;
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
...
...
@@ -78,8 +83,13 @@ TEST(OpRegistry, CreateOp) {
TEST
(
OpRegistry
,
IllegalAttr
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"cos_sim"
);
op_desc
.
add_inputs
(
"aa"
);
op_desc
.
add_outputs
(
"bb"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"aa"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"bb"
;
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
...
...
@@ -103,8 +113,13 @@ TEST(OpRegistry, IllegalAttr) {
TEST
(
OpRegistry
,
DefaultValue
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"cos_sim"
);
op_desc
.
add_inputs
(
"aa"
);
op_desc
.
add_outputs
(
"bb"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"aa"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"bb"
;
ASSERT_TRUE
(
op_desc
.
IsInitialized
());
...
...
@@ -127,8 +142,13 @@ static void SetInputFormat(paddle::framework::OpDesc* desc) {
TEST
(
OpRegistry
,
CustomChecker
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"my_test_op"
);
op_desc
.
add_inputs
(
"ii"
);
op_desc
.
add_outputs
(
"oo"
);
auto
input
=
op_desc
.
add_inputs
();
input
->
set_op_proto_name
(
"input"
);
*
input
->
mutable_var_names
()
->
Add
()
=
"ii"
;
auto
output
=
op_desc
.
add_outputs
();
output
->
set_op_proto_name
(
"output"
);
*
output
->
mutable_var_names
()
->
Add
()
=
"oo"
;
SetInputFormat
(
&
op_desc
);
// attr 'test_attr' is not set
...
...
paddle/framework/operator.cc
浏览文件 @
d97a2b42
...
...
@@ -34,83 +34,72 @@ ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const {
#endif
const
std
::
string
&
OperatorBase
::
Input
(
const
std
::
string
&
name
)
const
{
PADDLE_ENFORCE
(
in_out_idxs_
!=
nullptr
,
"Input Output Indices could not be nullptr"
);
auto
it
=
in_out_idxs_
->
find
(
name
);
PADDLE_ENFORCE
(
it
!=
in_out_idxs_
->
end
(),
"no key [%s] in in_out_idxs_"
,
auto
it
=
inputs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
inputs_
.
end
(),
"Op %s does not have output %s"
,
type_
,
name
);
if
(
attrs_
.
count
(
"input_format"
)
==
0
)
{
return
inputs_
.
at
((
size_t
)
it
->
second
);
}
else
{
const
auto
&
input_format
=
GetAttr
<
std
::
vector
<
int
>>
(
"input_format"
);
int
idx
=
input_format
[
it
->
second
];
return
inputs_
.
at
((
size_t
)
idx
);
}
PADDLE_ENFORCE_EQ
(
it
->
second
.
size
(),
1UL
,
"Op %s input %s should contain only one variable"
,
type_
,
name
);
return
it
->
second
[
0
];
}
std
::
vector
<
std
::
string
>
OperatorBase
::
Inputs
(
const
std
::
string
&
name
)
const
{
PADDLE_ENFORCE
(
in_out_idxs_
!=
nullptr
,
"IO Idx could not be nullptr"
);
auto
input_format
=
GetAttr
<
std
::
vector
<
int
>>
(
"input_format"
);
auto
offset
=
in_out_idxs_
->
at
(
name
);
PADDLE_ENFORCE
(
input_format
.
at
(
static_cast
<
size_t
>
(
offset
)
+
1
)
<=
static_cast
<
int
>
(
inputs_
.
size
()),
"Input Out Of Range"
);
return
std
::
vector
<
std
::
string
>
{
inputs_
.
begin
()
+
input_format
.
at
(
offset
),
inputs_
.
begin
()
+
input_format
.
at
(
offset
+
1
)};
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Inputs
(
const
std
::
string
&
name
)
const
{
return
inputs_
.
at
(
name
);
}
const
std
::
string
&
OperatorBase
::
Output
(
const
std
::
string
&
name
)
const
{
PADDLE_ENFORCE
(
in_out_idxs_
!=
nullptr
,
"InOut Indice could not be nullptr"
);
auto
it
=
in_out_idxs_
->
find
(
name
);
PADDLE_ENFORCE
(
it
!=
in_out_idxs_
->
end
(),
"no key [%s] in in_out_idxs_"
,
auto
it
=
outputs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
outputs_
.
end
(),
"Op %s does not have output %s"
,
type_
,
name
);
if
(
attrs_
.
count
(
"output_format"
)
==
0
)
{
return
outputs_
.
at
((
size_t
)
it
->
second
);
}
else
{
const
auto
&
output_format
=
GetAttr
<
std
::
vector
<
int
>>
(
"output_format"
);
int
idx
=
output_format
[
it
->
second
];
return
outputs_
.
at
((
size_t
)
idx
);
}
PADDLE_ENFORCE_EQ
(
it
->
second
.
size
(),
1UL
,
"Op %s input %s should contain only one variable"
,
type_
,
name
);
return
it
->
second
[
0
];
}
std
::
vector
<
std
::
string
>
OperatorBase
::
Outputs
(
const
std
::
string
&
name
)
const
{
PADDLE_ENFORCE
(
in_out_idxs_
!=
nullptr
,
"InOut Indice could not be nullptr"
);
auto
output_format
=
GetAttr
<
std
::
vector
<
int
>>
(
"output_format"
);
auto
offset
=
in_out_idxs_
->
at
(
name
);
PADDLE_ENFORCE
(
output_format
.
at
(
static_cast
<
size_t
>
(
offset
)
+
1
)
<=
static_cast
<
int
>
(
outputs_
.
size
()),
"Output Out of Range"
);
return
std
::
vector
<
std
::
string
>
{
outputs_
.
begin
()
+
output_format
.
at
(
offset
),
outputs_
.
begin
()
+
output_format
.
at
(
offset
+
1
)};
const
std
::
vector
<
std
::
string
>&
OperatorBase
::
Outputs
(
const
std
::
string
&
name
)
const
{
return
outputs_
.
at
(
name
);
}
std
::
string
OperatorBase
::
DebugString
()
const
{
std
::
stringstream
ss
;
ss
<<
"Op("
<<
type_
<<
"), inputs:("
;
for
(
size_t
i
=
0
;
i
<
inputs_
.
size
();
++
i
)
{
ss
<<
inputs_
[
i
];
if
(
i
!=
inputs_
.
size
()
-
1
)
{
ss
<<
", "
;
ss
<<
"Op("
<<
type_
<<
"), inputs:{"
;
for
(
auto
&
input
:
inputs_
)
{
ss
<<
input
.
first
<<
"["
;
for
(
size_t
i
=
0
;
i
<
input
.
second
.
size
();
++
i
)
{
ss
<<
input
.
second
[
i
];
if
(
i
!=
input
.
second
.
size
()
-
1
)
{
ss
<<
", "
;
}
}
ss
<<
"]"
;
}
ss
<<
"), outputs:("
;
for
(
size_t
i
=
0
;
i
<
outputs_
.
size
();
++
i
)
{
ss
<<
outputs_
[
i
];
if
(
i
!=
outputs_
.
size
()
-
1
)
{
ss
<<
", "
;
ss
<<
"}, outputs:{"
;
for
(
auto
&
output
:
outputs_
)
{
ss
<<
output
.
first
<<
"["
;
for
(
size_t
i
=
0
;
i
<
output
.
second
.
size
();
++
i
)
{
ss
<<
output
.
second
[
i
];
if
(
i
!=
output
.
second
.
size
()
-
1
)
{
ss
<<
", "
;
}
}
ss
<<
"]"
;
}
ss
<<
"
)
."
;
ss
<<
"
}
."
;
return
ss
.
str
();
}
void
OperatorBase
::
Rename
(
const
std
::
string
&
old_name
,
const
std
::
string
&
new_name
)
{
std
::
replace
(
inputs_
.
begin
(),
inputs_
.
end
(),
old_name
,
new_name
);
std
::
replace
(
outputs_
.
begin
(),
outputs_
.
end
(),
old_name
,
new_name
);
for
(
auto
&
input
:
inputs_
)
{
std
::
replace
(
input
.
second
.
begin
(),
input
.
second
.
end
(),
old_name
,
new_name
);
}
for
(
auto
&
output
:
outputs_
)
{
std
::
replace
(
output
.
second
.
begin
(),
output
.
second
.
end
(),
old_name
,
new_name
);
}
}
}
// namespace framework
...
...
paddle/framework/operator.h
浏览文件 @
d97a2b42
...
...
@@ -21,8 +21,7 @@ limitations under the License. */
#include <vector>
#include "paddle/framework/attribute.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
...
...
@@ -95,13 +94,12 @@ class OperatorBase {
const
std
::
string
&
Input
(
const
std
::
string
&
name
)
const
;
//! Get a input which has multiple variables.
//! TODO add a vector_view to prevent memory copy.
std
::
vector
<
std
::
string
>
Inputs
(
const
std
::
string
&
name
)
const
;
const
std
::
vector
<
std
::
string
>&
Inputs
(
const
std
::
string
&
name
)
const
;
//! Get a output with argument's name described in `op_proto`
const
std
::
string
&
Output
(
const
std
::
string
&
name
)
const
;
//! Get an output which has multiple variables.
//! TODO add a vector_view to prevent memory copy.
std
::
vector
<
std
::
string
>
Outputs
(
const
std
::
string
&
name
)
const
;
const
std
::
vector
<
std
::
string
>&
Outputs
(
const
std
::
string
&
name
)
const
;
public:
std
::
string
type_
;
...
...
@@ -109,13 +107,12 @@ class OperatorBase {
// I (Inputs)
// O (Outputs)
// OG (Output Gradients)
std
::
vector
<
std
::
string
>
inputs_
;
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
inputs_
;
// NOTE: in case of OpGrad, outputs_ contains
// IG (Inputs Gradients)
std
::
vector
<
std
::
string
>
outputs_
;
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>
>
outputs_
;
AttributeMap
attrs_
;
// store the arguments' offset described in op_desc.
std
::
shared_ptr
<
std
::
unordered_map
<
std
::
string
,
int
>>
in_out_idxs_
;
};
class
OperatorContext
{
...
...
@@ -123,16 +120,12 @@ class OperatorContext {
OperatorContext
(
const
OperatorBase
*
op
,
const
Scope
&
scope
)
:
op_
(
*
op
),
scope_
(
scope
)
{}
size_t
InputSize
()
const
{
return
op_
.
inputs_
.
size
();
}
size_t
OutputSize
()
const
{
return
op_
.
outputs_
.
size
();
}
const
Variable
*
InputVar
(
const
size_t
index
)
const
{
return
scope_
.
FindVar
(
op_
.
inputs_
.
at
(
index
));
size_t
InputSize
(
const
std
::
string
&
name
)
const
{
return
op_
.
inputs_
.
at
(
name
).
size
();
}
Variable
*
OutputVar
(
const
size_t
index
)
const
{
return
scope_
.
FindVar
(
op_
.
outputs_
.
at
(
index
)
);
size_t
OutputSize
(
const
std
::
string
&
name
)
const
{
return
op_
.
outputs_
.
at
(
name
).
size
(
);
}
const
Variable
*
InputVar
(
const
std
::
string
&
name
)
const
{
...
...
@@ -164,24 +157,6 @@ class OperatorContext {
return
res
;
}
template
<
typename
T
>
const
T
*
Input
(
const
size_t
index
)
const
{
auto
var
=
InputVar
(
index
);
PADDLE_ENFORCE
(
var
!=
nullptr
,
"Input(%d) should not be nullptr"
,
index
);
return
&
var
->
Get
<
T
>
();
}
template
<
typename
T
>
T
*
Output
(
const
size_t
index
)
const
{
auto
var
=
OutputVar
(
index
);
PADDLE_ENFORCE
(
var
!=
nullptr
,
"Output(%d) not be nullptr, which means variable [%s] does not "
"exist in scope"
,
index
,
op_
.
outputs_
[
index
]);
return
var
->
GetMutable
<
T
>
();
}
template
<
typename
T
>
const
T
*
Input
(
const
std
::
string
&
name
)
const
{
auto
var
=
InputVar
(
name
);
...
...
paddle/framework/operator_test.cc
浏览文件 @
d97a2b42
...
...
@@ -27,12 +27,12 @@ class OpWithoutKernelTest : public OperatorBase {
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
op_run_num
++
;
ASSERT_EQ
(
(
int
)
inputs_
.
size
(
),
1
);
ASSERT_EQ
(
(
int
)
outputs_
.
size
(
),
1
);
ASSERT_EQ
(
scope
.
FindVar
(
inputs_
[
0
]),
nullptr
);
++
op_run_num
;
ASSERT_EQ
(
static_cast
<
int
>
(
inputs_
.
size
()
),
1
);
ASSERT_EQ
(
static_cast
<
int
>
(
outputs_
.
size
()
),
1
);
ASSERT_EQ
(
scope
.
FindVar
(
inputs_
.
at
(
"input"
)
[
0
]),
nullptr
);
ASSERT_EQ
(
x
,
1
);
ASSERT_NE
(
scope
.
FindVar
(
outputs_
[
0
]),
nullptr
);
ASSERT_NE
(
scope
.
FindVar
(
outputs_
.
at
(
"output"
)
[
0
]),
nullptr
);
}
public:
...
...
@@ -60,8 +60,13 @@ REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest,
TEST
(
OperatorBase
,
all
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"test_operator"
);
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"IN1"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"OUT1"
;
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
*
ipt
->
mutable_var_names
()
->
Add
()
=
"IN1"
;
ipt
->
set_op_proto_name
(
"input"
);
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
*
output
->
mutable_var_names
()
->
Add
()
=
"OUT1"
;
output
->
set_op_proto_name
(
"output"
);
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
...
...
@@ -113,24 +118,6 @@ class CPUKernelTest : public OpKernel {
}
};
// multiple inputs test
class
OperatorMultiInputsTest
:
public
OperatorBase
{
public:
void
Init
()
override
{
x
=
1
;
}
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
ASSERT_EQ
(
scope
.
FindVar
(
inputs_
[
0
]),
nullptr
);
ASSERT_EQ
(
x
,
1
);
ASSERT_NE
(
scope
.
FindVar
(
outputs_
[
0
]),
nullptr
);
ASSERT_EQ
(
Input
(
"x"
),
"IN1"
);
ASSERT_EQ
(
Input
(
"y"
),
"OUT1"
);
}
public:
float
x
=
0
;
};
class
OpKernelTestMultiInputsProtoAndCheckerMaker
:
public
OpProtoAndCheckerMaker
{
public:
...
...
@@ -196,8 +183,14 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel,
TEST
(
OpKernel
,
all
)
{
paddle
::
framework
::
OpDesc
op_desc
;
op_desc
.
set_type
(
"op_with_kernel"
);
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"IN1"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"OUT1"
;
auto
*
ipt
=
op_desc
.
mutable_inputs
()
->
Add
();
*
ipt
->
mutable_var_names
()
->
Add
()
=
"IN1"
;
ipt
->
set_op_proto_name
(
"input"
);
auto
*
output
=
op_desc
.
mutable_outputs
()
->
Add
();
*
output
->
mutable_var_names
()
->
Add
()
=
"OUT1"
;
output
->
set_op_proto_name
(
"output"
);
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
...
...
@@ -223,12 +216,19 @@ TEST(OpKernel, multi_inputs) {
OpDesc
op_desc
;
op_desc
.
set_type
(
"op_multi_inputs_with_kernel"
);
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"x0"
;
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"x1"
;
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"x2"
;
*
op_desc
.
mutable_inputs
()
->
Add
()
=
"k0"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"y0"
;
*
op_desc
.
mutable_outputs
()
->
Add
()
=
"y1"
;
auto
x
=
op_desc
.
mutable_inputs
()
->
Add
();
x
->
set_op_proto_name
(
"xs"
);
*
x
->
mutable_var_names
()
->
Add
()
=
"x0"
;
*
x
->
mutable_var_names
()
->
Add
()
=
"x1"
;
*
x
->
mutable_var_names
()
->
Add
()
=
"x2"
;
auto
k
=
op_desc
.
mutable_inputs
()
->
Add
();
k
->
set_op_proto_name
(
"k"
);
*
k
->
mutable_var_names
()
->
Add
()
=
"k0"
;
auto
y
=
op_desc
.
mutable_outputs
()
->
Add
();
y
->
set_op_proto_name
(
"ys"
);
*
y
->
mutable_var_names
()
->
Add
()
=
"y0"
;
*
y
->
mutable_var_names
()
->
Add
()
=
"y1"
;
auto
attr
=
op_desc
.
mutable_attrs
()
->
Add
();
attr
->
set_name
(
"scale"
);
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
...
...
paddle/framework/pybind.cc
浏览文件 @
d97a2b42
...
...
@@ -53,9 +53,10 @@ void ExposeOperator(ClassType &m) {
return
op
.
type_
;
})
.
def
(
"outputs"
,
[](
const
typename
ClassType
::
type
&
op
)
->
std
::
vector
<
std
::
string
>
{
return
op
.
outputs_
;
})
[](
const
typename
ClassType
::
type
&
op
)
->
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
string
>>
{
return
op
.
outputs_
;
})
.
def
(
"__str__"
,
&
ClassType
::
type
::
DebugString
);
}
...
...
paddle/operators/add_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,15 +20,10 @@ namespace operators {
class
AddOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
2
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
0
)
!=
nullptr
&&
ctx
.
InputVar
(
1
)
!=
nullptr
,
"Inputs of AddOp must all be set"
);
PADDLE_ENFORCE
(
ctx
.
OutputVar
(
0
)
!=
nullptr
,
"Outputs of AddOp must all be set"
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
()
==
ctx
.
Input
<
Tensor
>
(
1
)
->
dims
(),
"Two input of Add Op's dimension must be same."
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
(),
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
(),
"Two input of Add Op's dimension must be same."
);
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
};
...
...
paddle/operators/add_op.h
浏览文件 @
d97a2b42
...
...
@@ -22,9 +22,9 @@ template <typename Place, typename T>
class
AddKernel
:
public
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
auto
input0
=
context
.
Input
<
Tensor
>
(
0
);
auto
input1
=
context
.
Input
<
Tensor
>
(
1
);
auto
output
=
context
.
Output
<
Tensor
>
(
0
);
auto
*
input0
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
input1
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
*
output
=
context
.
Output
<
Tensor
>
(
"Out"
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
paddle/operators/cross_entropy_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,19 +20,13 @@ namespace operators {
class
OnehotCrossEntropyOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
2
,
"Input size of OnehotCrossEntropyOp must be two"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"Output size of OnehotCrossEntropyOp must be one"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
0
)
!=
nullptr
&&
ctx
.
InputVar
(
1
)
!=
nullptr
,
"Inputs of OnehotCrossEntropyOp must all be set"
);
PADDLE_ENFORCE
(
ctx
.
OutputVar
(
0
)
!=
nullptr
,
"Outputs of OnehotCrossEntropyOp must all be set"
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
().
size
()
==
2
,
"X's dimension must be 2."
);
PADDLE_ENFORCE
(
ctx
.
Output
<
Tensor
>
(
0
)
->
dims
().
size
()
==
1
,
"label's dimension must be 1."
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
({
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
()[
0
]});
auto
*
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"label"
);
PADDLE_ENFORCE_EQ
(
X
->
dims
().
size
(),
2
,
"X's dimension must be 2."
);
PADDLE_ENFORCE_EQ
(
label
->
dims
().
size
(),
1
,
"label's dimension must be 1."
);
PADDLE_ENFORCE_EQ
(
X
->
dims
()[
0
],
label
->
dims
()[
0
]);
ctx
.
Output
<
Tensor
>
(
"Y"
)
->
Resize
({
X
->
dims
()[
0
]});
}
};
...
...
paddle/operators/cross_entropy_op.h
浏览文件 @
d97a2b42
...
...
@@ -43,7 +43,7 @@ class OnehotCrossEntropyOpKernel : public OpKernel {
void
Compute
(
const
ExecutionContext
&
ctx
)
const
override
{
auto
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
T
*
Xdata
=
X
->
data
<
T
>
();
const
int
*
label_data
=
ctx
.
Input
<
Tensor
>
(
1
)
->
data
<
int
>
();
const
int
*
label_data
=
ctx
.
Input
<
Tensor
>
(
"label"
)
->
data
<
int
>
();
auto
Y
=
ctx
.
Output
<
Tensor
>
(
"Y"
);
Y
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
...
...
paddle/operators/fc_op.cc
浏览文件 @
d97a2b42
...
...
@@ -22,19 +22,19 @@ class FullyConnectedOp : public NetOp {
void
Init
()
override
{
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
Input
(
"X"
),
Input
(
"W"
)
,
{
"X"
,
{
Input
(
"X"
)}},
{
"Y"
,
{
Input
(
"W"
)}}
,
},
{
Output
(
"before_act"
)
},
{}));
{
{
"Out"
,
{
Output
(
"before_act"
)}}
},
{}));
auto
b
=
Input
(
"b"
);
if
(
b
!=
framework
::
kEmptyVarName
)
{
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"before_act"
),
Input
(
"b"
)
},
{
Output
(
"before_act"
)
},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{{
"X"
,
{
Output
(
"before_act"
)}},
{
"b"
,
{
Input
(
"b"
)}}
},
{{
"Out"
,
{
Output
(
"before_act"
)}}
},
{}));
}
auto
activation
=
GetAttr
<
std
::
string
>
(
"activation"
);
AddOp
(
OpRegistry
::
CreateOp
(
activation
,
{
Output
(
"before_act"
)
},
{
Output
(
"Y"
)
},
{}));
AddOp
(
OpRegistry
::
CreateOp
(
activation
,
{
{
"X"
,
{
Output
(
"before_act"
)}}
},
{
{
"Out"
,
{
Output
(
"Out"
)}}
},
{}));
CompleteAddOp
(
false
);
}
};
...
...
@@ -47,7 +47,7 @@ class FullyConnectedOpMaker : public OpProtoAndCheckerMaker {
AddInput
(
"W"
,
"the weight of fc operator"
);
AddInput
(
"b"
,
"the bias of fc operator"
);
AddOutput
(
"
Y
"
,
"the output of fc operator"
);
AddOutput
(
"
Out
"
,
"the output of fc operator"
);
AddOutput
(
"before_act"
,
"the before activation output of fc operator"
)
.
SetTemporary
();
AddAttr
<
std
::
string
>
(
"activation"
,
"The activation key for fc layer"
)
...
...
paddle/operators/fill_zeros_like_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,16 +20,8 @@ namespace operators {
class
FillZerosLikeOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
1UL
,
"Input size of FillZerosLikeOp must be one."
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1UL
,
"Output size of AddOp must be one."
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
0
)
!=
nullptr
,
"Input of FillZerosLikeOp must be set."
);
PADDLE_ENFORCE
(
ctx
.
OutputVar
(
0
)
!=
nullptr
,
"Output of FillZerosLikeOp must be set."
);
ctx
.
Output
<
framework
::
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
framework
::
Tensor
>
(
0
)
->
dims
());
ctx
.
Output
<
framework
::
Tensor
>
(
"Dst"
)
->
Resize
(
ctx
.
Input
<
framework
::
Tensor
>
(
"Src"
)
->
dims
());
}
};
...
...
paddle/operators/mean_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,11 +20,9 @@ namespace operators {
class
MeanOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
1
,
"Input size of AddOp must be one"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"Output size of AddOp must be one"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
0
)
!=
nullptr
&&
ctx
.
OutputVar
(
0
)
!=
nullptr
,
"Input/Output of MeanOp must be initialized."
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
framework
::
make_ddim
({
1
}));
PADDLE_ENFORCE
(
ctx
.
InputVar
(
"X"
)
!=
nullptr
,
"Input of MeanOp must be initialized."
);
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
({
1
});
}
};
...
...
paddle/operators/mul_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,9 +20,8 @@ namespace operators {
class
MulOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
2
,
"The mul op must take two inputs"
);
auto
dim0
=
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
();
auto
dim1
=
ctx
.
Input
<
Tensor
>
(
1
)
->
dims
();
auto
dim0
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
dim1
=
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
();
PADDLE_ENFORCE_EQ
(
dim0
.
size
(),
2
,
"input X(%s) should be a tensor with 2 dims, a matrix"
,
ctx
.
op_
.
Input
(
"X"
));
...
...
@@ -32,8 +31,7 @@ class MulOp : public OperatorWithKernel {
PADDLE_ENFORCE_EQ
(
dim0
[
1
],
dim1
[
0
],
"First matrix's width must be equal with second matrix's height."
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1
,
"The mul op takes only one output"
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
({
dim0
[
0
],
dim1
[
1
]});
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
({
dim0
[
0
],
dim1
[
1
]});
}
};
...
...
paddle/operators/net_op.cc
浏览文件 @
d97a2b42
...
...
@@ -15,6 +15,7 @@
*/
#include "paddle/operators/net_op.h"
#include <set>
#include "paddle/framework/op_registry.h"
namespace
paddle
{
...
...
@@ -23,36 +24,39 @@ namespace operators {
void
NetOp
::
CompleteAddOp
(
bool
calc
)
{
add_op_done_
=
true
;
if
(
!
calc
)
return
;
std
::
unordered_
set
<
std
::
string
>
input_set
;
std
::
unordered_
set
<
std
::
string
>
output_set
;
std
::
unordered_
set
<
std
::
string
>
temp_output
;
std
::
set
<
std
::
string
>
input_set
;
std
::
set
<
std
::
string
>
output_set
;
std
::
set
<
std
::
string
>
temp_output
;
for
(
auto
&
op
:
ops_
)
{
for
(
auto
&
ipt
:
op
->
inputs_
)
{
if
(
!
Contains
(
output_set
,
ipt
))
{
// Not other op's output
input_set
.
insert
(
ipt
);
}
else
{
temp_output
.
insert
(
ipt
);
for
(
auto
&
var_name
:
ipt
.
second
)
{
if
(
!
Contains
(
output_set
,
var_name
))
{
// Not other op's output
input_set
.
insert
(
var_name
);
}
else
{
temp_output
.
insert
(
var_name
);
}
}
}
for
(
auto
&
opt
:
op
->
outputs_
)
{
output_set
.
insert
(
opt
);
for
(
auto
&
var_name
:
opt
.
second
)
{
output_set
.
insert
(
var_name
);
}
}
}
auto
&
inputs
=
inputs_
[
"all"
];
inputs
.
reserve
(
input_set
.
size
());
std
::
copy
(
input_set
.
begin
(),
input_set
.
end
(),
std
::
back_inserter
(
inputs
));
auto
&
outputs
=
outputs_
[
"all"
];
outputs
.
reserve
(
output_set
.
size
());
std
::
copy
(
output_set
.
begin
(),
output_set
.
end
(),
std
::
back_inserter
(
outputs
));
inputs_
.
reserve
(
input_set
.
size
());
std
::
copy
(
input_set
.
begin
(),
input_set
.
end
(),
std
::
back_inserter
(
inputs_
));
std
::
sort
(
inputs_
.
begin
(),
inputs_
.
end
());
outputs_
.
reserve
(
output_set
.
size
());
std
::
copy
(
output_set
.
begin
(),
output_set
.
end
(),
std
::
back_inserter
(
outputs_
));
std
::
sort
(
outputs_
.
begin
(),
outputs_
.
end
());
//! TODO figure out how to generate temporary_index in Network.
std
::
vector
<
int
>
tmp_index
;
tmp_index
.
reserve
(
temp_output
.
size
());
int
output_len
=
static_cast
<
int
>
(
outputs
_
.
size
());
int
output_len
=
static_cast
<
int
>
(
outputs
.
size
());
for
(
int
i
=
0
;
i
<
output_len
;
++
i
)
{
if
(
Contains
(
temp_output
,
outputs
_
[
i
]))
{
if
(
Contains
(
temp_output
,
outputs
[
i
]))
{
tmp_index
.
push_back
(
i
);
}
}
...
...
paddle/operators/net_op.h
浏览文件 @
d97a2b42
...
...
@@ -14,8 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/framework.pb.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
...
...
paddle/operators/net_op_test.cc
浏览文件 @
d97a2b42
...
...
@@ -47,23 +47,24 @@ TEST(OpKernel, all) {
ASSERT_NE
(
net
,
nullptr
);
auto
op1
=
std
::
make_shared
<
TestOp
>
();
op1
->
inputs_
=
{
"x"
,
"w1"
,
"b1"
};
op1
->
outputs_
=
{
"y"
};
op1
->
inputs_
=
{
{
"X"
,
{
"x"
}},
{
"W"
,
{
"w1"
}},
{
"b"
,
{
"b1"
}}
};
op1
->
outputs_
=
{
{
"Out"
,
{
"y"
}}
};
net
->
AddOp
(
op1
);
auto
op2
=
std
::
make_shared
<
TestOp
>
();
op2
->
inputs_
=
{
"y"
,
"w2"
,
"b2"
};
op2
->
outputs_
=
{
"z"
};
op2
->
inputs_
=
{
{
"X"
,
{
"y"
}},
{
"W"
,
{
"w2"
}},
{
"b"
,
{
"b2"
}}
};
op2
->
outputs_
=
{
{
"Out"
,
{
"z"
}}
};
net
->
AddOp
(
op2
);
net
->
CompleteAddOp
();
AssertSameVectorWithoutOrder
({
"x"
,
"w1"
,
"b1"
,
"w2"
,
"b2"
},
net
->
inputs_
);
AssertSameVectorWithoutOrder
({
"y"
,
"z"
},
net
->
outputs_
);
AssertSameVectorWithoutOrder
({
"x"
,
"w1"
,
"b1"
,
"w2"
,
"b2"
},
net
->
inputs_
.
at
(
"__all__"
));
AssertSameVectorWithoutOrder
({
"y"
,
"z"
},
net
->
outputs_
.
at
(
"__all__"
));
auto
tmp_idx_iter
=
net
->
attrs_
.
find
(
"temporary_index"
);
ASSERT_NE
(
net
->
attrs_
.
end
(),
tmp_idx_iter
);
auto
&
tmp_idx
=
boost
::
get
<
std
::
vector
<
int
>>
(
tmp_idx_iter
->
second
);
ASSERT_EQ
(
1UL
,
tmp_idx
.
size
());
ASSERT_EQ
(
"y"
,
net
->
outputs_
[
tmp_idx
[
0
]]);
ASSERT_EQ
(
"y"
,
net
->
outputs_
.
at
(
"__all__"
)
[
tmp_idx
[
0
]]);
Scope
scope
;
platform
::
CPUDeviceContext
dev_ctx
;
...
...
@@ -78,8 +79,8 @@ TEST(OpKernel, all) {
TEST
(
NetOp
,
insert_op
)
{
NetOp
net
;
auto
op1
=
std
::
make_shared
<
EmptyOp
>
();
op1
->
inputs_
=
{
"x"
,
"w1"
,
"b1"
};
op1
->
outputs_
=
{
"y"
};
op1
->
inputs_
=
{
{
"X"
,
{
"x"
}},
{
"W"
,
{
"w1"
}},
{
"b"
,
{
"b1"
}}
};
op1
->
outputs_
=
{
{
"Out"
,
{
"y"
}}
};
net
.
AddOp
(
op1
);
net
.
InsertOp
(
0
,
op1
);
ASSERT_EQ
(
2UL
,
net
.
ops_
.
size
());
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
d97a2b42
...
...
@@ -89,12 +89,17 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const {
// create step net's temp inputs
for
(
auto
&
input
:
net_op
->
inputs_
)
{
// the weight are located in parent scope
if
(
!
step_scope
.
FindVar
(
input
))
step_scope
.
NewVar
(
input
)
->
GetMutable
<
Tensor
>
();
for
(
auto
&
var_name
:
input
.
second
)
{
if
(
!
step_scope
.
FindVar
(
var_name
))
{
step_scope
.
NewVar
(
var_name
)
->
GetMutable
<
Tensor
>
();
}
}
}
// create stepnet's outputs
for
(
const
auto
&
output
:
net_op
->
outputs_
)
{
step_scope
.
NewVar
(
output
);
for
(
auto
&
var_name
:
output
.
second
)
{
step_scope
.
NewVar
(
var_name
);
}
}
step_scopes
->
emplace_back
(
&
step_scope
);
}
...
...
paddle/operators/recurrent_op_test.cc
浏览文件 @
d97a2b42
此差异已折叠。
点击以展开。
paddle/operators/rowwise_add_op.cc
浏览文件 @
d97a2b42
...
...
@@ -19,16 +19,14 @@ namespace operators {
class
RowWiseAddOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
2UL
,
"Two inputs is needed by rowwise add"
);
auto
dim0
=
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
();
auto
dim1
=
ctx
.
Input
<
Tensor
>
(
1
)
->
dims
();
auto
dim0
=
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
();
auto
dim1
=
ctx
.
Input
<
Tensor
>
(
"b"
)
->
dims
();
PADDLE_ENFORCE
(
dim0
.
size
()
==
2
,
"Input 0 must be matrix"
);
PADDLE_ENFORCE
(
dim1
.
size
()
==
1
,
"The second input must be vector"
);
PADDLE_ENFORCE
(
dim0
[
1
]
==
dim1
[
0
],
"The width of two input must be same"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"The output size must be 1"
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
PADDLE_ENFORCE
(
ctx
.
OutputSize
(
"Out"
)
==
1
,
"The output size must be 1"
);
ctx
.
Output
<
Tensor
>
(
"Out"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
};
...
...
paddle/operators/rowwise_add_op.h
浏览文件 @
d97a2b42
...
...
@@ -25,8 +25,8 @@ class RowWiseAddKernel : public OpKernel {
auto
out
=
context
.
Output
<
Tensor
>
(
0
);
out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
input
=
EigenMatrix
<
T
>::
From
(
*
context
.
Input
<
Tensor
>
(
0
));
auto
bias
=
EigenVector
<
T
>::
From
(
*
context
.
Input
<
Tensor
>
(
1
));
auto
input
=
EigenMatrix
<
T
>::
From
(
*
context
.
Input
<
Tensor
>
(
"X"
));
auto
bias
=
EigenVector
<
T
>::
From
(
*
context
.
Input
<
Tensor
>
(
"b"
));
auto
output
=
EigenMatrix
<
T
>::
From
(
*
out
);
const
int
bias_size
=
bias
.
dimension
(
0
);
...
...
paddle/operators/sgd_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,14 +20,10 @@ namespace operators {
class
SGDOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
2
,
"Input size of SGDOp must be two"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"Output size of SGDOp must be one"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
0
)
!=
nullptr
,
"inputs[0] mast be set"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
1
)
!=
nullptr
,
"inputs[1] mast be set"
);
PADDLE_ENFORCE
(
ctx
.
OutputVar
(
0
)
!=
nullptr
,
"outputs[0] mast be set"
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
()
==
ctx
.
Input
<
Tensor
>
(
1
)
->
dims
(),
"Two input of SGD Op's dimension must be same."
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"param"
)
->
dims
()
==
ctx
.
Input
<
Tensor
>
(
"grad"
)
->
dims
(),
"Two input of SGD Op's dimension must be same."
);
ctx
.
Output
<
Tensor
>
(
"param_out"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"param"
)
->
dims
());
}
};
...
...
paddle/operators/sigmoid_op.cc
浏览文件 @
d97a2b42
...
...
@@ -19,9 +19,7 @@ namespace operators {
class
SigmoidOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
1
,
"Sigmoid Op only have one input"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"Sigmoid Op only have one output"
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
ctx
.
Output
<
Tensor
>
(
"Y"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
};
...
...
paddle/operators/softmax_op.cc
浏览文件 @
d97a2b42
...
...
@@ -20,12 +20,8 @@ namespace operators {
class
SoftmaxOp
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
1UL
,
"Only one input is need for softmax"
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
()
==
2UL
,
"The input of softmax op must be matrix"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1UL
,
"Only one output is need for softmax"
);
ctx
.
Output
<
Tensor
>
(
"Y"
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
};
...
...
@@ -43,10 +39,6 @@ class SoftmaxOpMaker : public OpProtoAndCheckerMaker {
class
SoftmaxOpGrad
:
public
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
3UL
,
"Input of SoftmaxOpGrad should be 3, X, Y, YG"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1UL
,
"Output of SoftmaxOpGrad should be 1"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
"Y"
)
!=
nullptr
,
"Input(Y) should not be null"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
framework
::
GradVarName
(
"Y"
))
!=
nullptr
,
"Input(Y@GRAD) should not be null"
);
...
...
paddle/platform/enforce.h
浏览文件 @
d97a2b42
...
...
@@ -195,12 +195,28 @@ struct CompatibleType {
typedef
typename
std
::
conditional
<
t1_to_t2
,
T2
,
T1
>::
type
type
;
};
template
<
typename
T
>
inline
std
::
string
enforce_to_string
(
const
T
&
val
)
{
std
::
ostringstream
sout
;
sout
<<
val
;
return
sout
.
str
();
}
template
<
>
inline
std
::
string
enforce_to_string
(
const
std
::
string
&
val
)
{
return
val
;
}
template
<
>
inline
std
::
string
enforce_to_string
(
const
char
*
const
&
val
)
{
return
std
::
string
(
val
);
}
#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \
PADDLE_ENFORCE(__COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL0) \
__CMP __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL1), \
"enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \
#__VAL0, #__VAL1, std::to_string(__VAL0), \
std::to_string(__VAL1), \
#__VAL0, #__VAL1, \
paddle::platform::enforce_to_string(__VAL0), \
paddle::platform::enforce_to_string(__VAL1), \
paddle::string::Sprintf("" __VA_ARGS__));
#define __COMPATIBLE_TYPE(__VAL0, __VAL1, __VAL) \
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录