Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
af2eb949
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
af2eb949
编写于
9月 13, 2017
作者:
L
Liu Yiqun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Support inputs and weights of multi-dimensions and refine the output names.
上级
8495f3f0
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
64 addition
and
38 deletion
+64
-38
paddle/operators/fc_op.cc
paddle/operators/fc_op.cc
+35
-18
python/paddle/v2/framework/tests/test_fc_op.py
python/paddle/v2/framework/tests/test_fc_op.py
+29
-20
未找到文件。
paddle/operators/fc_op.cc
浏览文件 @
af2eb949
...
...
@@ -26,7 +26,7 @@ class FCOp : public NetOp {
:
NetOp
(
type
,
inputs
,
outputs
,
attrs
)
{
auto
x
=
Inputs
(
"X"
);
auto
w
=
Inputs
(
"W"
);
auto
mul_out
=
Outputs
(
"
mul_o
ut"
);
auto
mul_out
=
Outputs
(
"
MulO
ut"
);
PADDLE_ENFORCE_EQ
(
x
.
size
(),
w
.
size
(),
"The size of inputs X(%d) should be the same as that of weights W(%d)."
,
...
...
@@ -36,36 +36,51 @@ class FCOp : public NetOp {
"as that of inputs X(%d)."
,
mul_out
.
size
(),
x
.
size
());
in
t
n
=
x
.
size
();
PADDLE_ENFORCE_GE
(
n
,
1
,
size_
t
n
=
x
.
size
();
PADDLE_ENFORCE_GE
(
n
,
static_cast
<
size_t
>
(
1
)
,
"The size of inputs X(%d) should be no less than 1."
,
n
);
auto
x_num_col_dims
=
Attr
<
std
::
vector
<
int
>>
(
"xNumColDims"
);
auto
w_num_col_dims
=
Attr
<
std
::
vector
<
int
>>
(
"wNumColDims"
);
PADDLE_ENFORCE_EQ
(
x_num_col_dims
.
size
(),
n
,
"The size of attribute xNumColDims(%d) should be the "
"same as that of inputs X(%d)."
,
x_num_col_dims
.
size
(),
n
);
PADDLE_ENFORCE_EQ
(
w_num_col_dims
.
size
(),
n
,
"The size of attribute wNumColDims(%d) should be the "
"same as that of inputs X(%d)."
,
w_num_col_dims
.
size
(),
n
)
// mul_out[i] = X[i] * W[i]
for
(
int
i
=
0
;
i
<
n
;
i
++
)
{
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"mul"
,
{{
"X"
,
{
x
[
i
]}},
{
"Y"
,
{
w
[
i
]}}},
{{
"Out"
,
{
mul_out
[
i
]}}},
{}));
for
(
size_t
i
=
0
;
i
<
n
;
i
++
)
{
framework
::
AttributeMap
mul_attr
;
mul_attr
[
"x_num_col_dims"
]
=
static_cast
<
int
>
(
x_num_col_dims
[
i
]);
mul_attr
[
"y_num_col_dims"
]
=
static_cast
<
int
>
(
w_num_col_dims
[
i
]);
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"mul"
,
{{
"X"
,
{
x
[
i
]}},
{
"Y"
,
{
w
[
i
]}}},
{{
"Out"
,
{
mul_out
[
i
]}}},
mul_attr
));
}
// sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1]
if
(
n
>
1
)
{
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"sum"
,
{{
"X"
,
{
mul_out
}}},
{{
"Out"
,
{
Output
(
"
sum_o
ut"
)}}},
{}));
"sum"
,
{{
"X"
,
{
mul_out
}}},
{{
"Out"
,
{
Output
(
"
SumO
ut"
)}}},
{}));
}
else
{
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"identity"
,
{{
"X"
,
{
mul_out
[
0
]}}},
{{
"Y"
,
{
Output
(
"
sum_o
ut"
)}}},
{}));
"identity"
,
{{
"X"
,
{
mul_out
[
0
]}}},
{{
"Y"
,
{
Output
(
"
SumO
ut"
)}}},
{}));
}
// add_out = sum_out + b
auto
b
=
Input
(
"
b
"
);
std
::
string
add_out
=
"
sum_o
ut"
;
auto
b
=
Input
(
"
B
"
);
std
::
string
add_out
=
"
SumO
ut"
;
if
(
b
!=
framework
::
kEmptyVarName
)
{
add_out
=
"
add_o
ut"
;
add_out
=
"
AddO
ut"
;
AppendOp
(
framework
::
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{{
"X"
,
{
Output
(
"
sum_out"
)}},
{
"b"
,
{
Input
(
"b
"
)}}},
"rowwise_add"
,
{{
"X"
,
{
Output
(
"
SumOut"
)}},
{
"b"
,
{
Input
(
"B
"
)}}},
{{
"Out"
,
{
Output
(
add_out
)}}},
{}));
}
else
{
if
(
Output
(
"
add_o
ut"
)
!=
framework
::
kEmptyVarName
)
{
this
->
Rename
(
Output
(
"
add_o
ut"
),
framework
::
kEmptyVarName
);
if
(
Output
(
"
AddO
ut"
)
!=
framework
::
kEmptyVarName
)
{
this
->
Rename
(
Output
(
"
AddO
ut"
),
framework
::
kEmptyVarName
);
}
}
...
...
@@ -84,24 +99,26 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker {
.
AsDuplicable
();
AddInput
(
"W"
,
"The weights of FC operator, a ordered vector of 2-D matrix."
)
.
AsDuplicable
();
AddInput
(
"
b
"
,
"The 1-D bias vector of FC operator"
);
AddInput
(
"
B
"
,
"The 1-D bias vector of FC operator"
);
AddOutput
(
"Y"
,
"The activated output matrix of FC operator"
);
AddOutput
(
"
mul_o
ut"
,
AddOutput
(
"
MulO
ut"
,
"The intermediate outputs of FC operator, "
"saving the product of X[i] * W[i]"
)
.
AsIntermediate
()
.
AsDuplicable
();
AddOutput
(
"
sum_o
ut"
,
AddOutput
(
"
SumO
ut"
,
"The intermediate output of FC operator, "
"saving the sum of products, sum(X[i] * W[i])"
)
.
AsIntermediate
();
AddOutput
(
"
add_o
ut"
,
AddOutput
(
"
AddO
ut"
,
"The non-actived output of FC operator, saving X * W + b"
)
.
AsIntermediate
();
AddAttr
<
std
::
string
>
(
"activation"
,
"The activation type of FC operator."
)
.
SetDefault
(
"identity"
)
.
InEnum
({
"identity"
,
"sigmoid"
,
"softmax"
});
AddAttr
<
std
::
vector
<
int
>>
(
"xNumColDims"
,
""
);
AddAttr
<
std
::
vector
<
int
>>
(
"wNumColDims"
,
""
);
AddComment
(
R"DOC(
Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer
...
...
python/paddle/v2/framework/tests/test_fc_op.py
浏览文件 @
af2eb949
...
...
@@ -5,52 +5,61 @@ from op_test import OpTest
class
TestFCOp1
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"fc"
x0
=
np
.
random
.
random
((
16
,
32
)).
astype
(
"float32"
)
w0
=
np
.
random
.
random
((
32
,
10
)).
astype
(
"float32"
)
b
=
np
.
random
.
random
(
10
).
astype
(
"float32"
)
self
.
inputs
=
{
"X"
:
[(
"X0"
,
x0
)],
"W"
:
[(
"W0"
,
w0
)],
"b"
:
b
}
mul_out0
=
np
.
dot
(
x0
,
w0
)
sum_out
=
mul_out0
add_out
=
sum_out
+
b
identity_out
=
add_out
self
.
op_type
=
"fc"
self
.
inputs
=
{
"X"
:
[(
"X0"
,
x0
)],
"W"
:
[(
"W0"
,
w0
)],
"B"
:
b
}
self
.
outputs
=
{
"
mul_out"
:
[(
"mul_o
ut0"
,
mul_out0
)],
"
sum_o
ut"
:
sum_out
,
"
add_o
ut"
:
add_out
,
"
MulOut"
:
[(
"MulO
ut0"
,
mul_out0
)],
"
SumO
ut"
:
sum_out
,
"
AddO
ut"
:
add_out
,
"Y"
:
identity_out
}
self
.
attrs
=
{
"xNumColDims"
:
[
1
],
"wNumColDims"
:
[
1
]}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X0"
,
"W0"
,
"
b
"
],
"Y"
,
max_relative_error
=
0.01
)
self
.
check_grad
([
"X0"
,
"W0"
,
"
B
"
],
"Y"
,
max_relative_error
=
0.01
)
class
TestFCOp2
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"fc"
x0
=
np
.
random
.
random
((
16
,
32
)).
astype
(
"float32"
)
x0
=
np
.
random
.
random
((
16
,
4
,
8
)).
astype
(
"float32"
)
x1
=
np
.
random
.
random
((
16
,
32
)).
astype
(
"float32"
)
w0
=
np
.
random
.
random
((
32
,
10
)).
astype
(
"float32"
)
w1
=
np
.
random
.
random
((
32
,
10
)).
astype
(
"float32"
)
w1
=
np
.
random
.
random
((
4
,
8
,
10
)).
astype
(
"float32"
)
b
=
np
.
random
.
random
(
10
).
astype
(
"float32"
)
mul_out0
=
np
.
dot
(
x0
.
reshape
(
16
,
4
*
8
),
w0
)
mul_out1
=
np
.
dot
(
x1
,
w1
.
reshape
(
4
*
8
,
10
))
sum_out
=
mul_out0
+
mul_out1
add_out
=
np
.
add
(
sum_out
,
b
)
sigmoid_out
=
1
/
(
1
+
np
.
exp
(
-
add_out
))
self
.
op_type
=
"fc"
self
.
inputs
=
{
"X"
:
[(
"X0"
,
x0
),
(
"X1"
,
x1
)],
"W"
:
[(
"W0"
,
w0
),
(
"W1"
,
w1
)],
"b"
:
b
"B"
:
b
}
self
.
attrs
=
{
"xNumColDims"
:
[
1
,
1
],
"wNumColDims"
:
[
1
,
2
],
"activation"
:
"sigmoid"
}
self
.
attrs
=
{
"activation"
:
"sigmoid"
}
mul_out0
=
np
.
dot
(
x0
,
w0
)
mul_out1
=
np
.
dot
(
x1
,
w1
)
sum_out
=
mul_out0
+
mul_out1
add_out
=
np
.
add
(
sum_out
,
b
)
sigmoid_out
=
1
/
(
1
+
np
.
exp
(
-
add_out
))
self
.
outputs
=
{
"
mul_out"
:
[(
"mul_out0"
,
mul_out0
),
(
"mul_o
ut1"
,
mul_out1
)],
"
sum_o
ut"
:
sum_out
,
"
add_o
ut"
:
add_out
,
"
MulOut"
:
[(
"MulOut0"
,
mul_out0
),
(
"MulO
ut1"
,
mul_out1
)],
"
SumO
ut"
:
sum_out
,
"
AddO
ut"
:
add_out
,
"Y"
:
sigmoid_out
}
...
...
@@ -59,7 +68,7 @@ class TestFCOp2(OpTest):
def
test_check_grad
(
self
):
self
.
check_grad
(
[
"X0"
,
"X1"
,
"W0"
,
"W1"
,
"
b
"
],
"Y"
,
max_relative_error
=
0.01
)
[
"X0"
,
"X1"
,
"W0"
,
"W1"
,
"
B
"
],
"Y"
,
max_relative_error
=
0.01
)
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录