Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
a07cef33
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a07cef33
编写于
8月 04, 2017
作者:
Y
Yi Wang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/paddlepaddle/paddle
into cpplint_errors
上级
7aac1218
2fef13e3
变更
24
显示空白变更内容
内联
并排
Showing
24 changed file
with
286 addition
and
291 deletion
+286
-291
cmake/cpplint.cmake
cmake/cpplint.cmake
+4
-1
cmake/util.cmake
cmake/util.cmake
+0
-1
paddle/framework/CMakeLists.txt
paddle/framework/CMakeLists.txt
+7
-5
paddle/framework/attribute.cc
paddle/framework/attribute.cc
+85
-0
paddle/framework/attribute.h
paddle/framework/attribute.h
+9
-0
paddle/framework/attribute.proto
paddle/framework/attribute.proto
+0
-0
paddle/framework/backward.cc
paddle/framework/backward.cc
+10
-12
paddle/framework/backward_test.cc
paddle/framework/backward_test.cc
+25
-52
paddle/framework/grad_op_builder.cc
paddle/framework/grad_op_builder.cc
+4
-6
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+28
-36
paddle/framework/op_desc.proto
paddle/framework/op_desc.proto
+1
-1
paddle/framework/op_proto.proto
paddle/framework/op_proto.proto
+1
-1
paddle/framework/op_registry.cc
paddle/framework/op_registry.cc
+3
-32
paddle/framework/op_registry.h
paddle/framework/op_registry.h
+4
-47
paddle/framework/operator.h
paddle/framework/operator.h
+21
-20
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+2
-2
paddle/gserver/tests/CMakeLists.txt
paddle/gserver/tests/CMakeLists.txt
+6
-1
paddle/math/MathUtils.cpp
paddle/math/MathUtils.cpp
+1
-1
paddle/math/tests/test_matrixCompare.cpp
paddle/math/tests/test_matrixCompare.cpp
+66
-64
paddle/operators/fc_op.cc
paddle/operators/fc_op.cc
+1
-1
paddle/operators/mean_op.cc
paddle/operators/mean_op.cc
+1
-1
paddle/operators/mean_op.h
paddle/operators/mean_op.h
+2
-2
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+3
-3
paddle/operators/softmax_op.h
paddle/operators/softmax_op.h
+2
-2
未找到文件。
cmake/cpplint.cmake
浏览文件 @
a07cef33
...
@@ -56,11 +56,14 @@ macro(add_style_check_target TARGET_NAME)
...
@@ -56,11 +56,14 @@ macro(add_style_check_target TARGET_NAME)
# cpplint code style
# cpplint code style
get_filename_component
(
base_filename
${
filename
}
NAME
)
get_filename_component
(
base_filename
${
filename
}
NAME
)
set
(
CUR_GEN
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
base_filename
}
.cpplint
)
set
(
CUR_GEN
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
base_filename
}
.cpplint
)
add_custom_command
(
TARGET
${
TARGET_NAME
}
PRE_BUILD
add_custom_command
(
OUTPUT
${
CUR_GEN
}
PRE_BUILD
COMMAND
"
${
PYTHON_EXECUTABLE
}
"
"
${
PROJ_ROOT
}
/paddle/scripts/cpplint.py"
COMMAND
"
${
PYTHON_EXECUTABLE
}
"
"
${
PROJ_ROOT
}
/paddle/scripts/cpplint.py"
"--filter=
${
STYLE_FILTER
}
"
"--filter=
${
STYLE_FILTER
}
"
"--write-success=
${
CUR_GEN
}
"
${
filename
}
"--write-success=
${
CUR_GEN
}
"
${
filename
}
DEPENDS
${
filename
}
${
PROJ_ROOT
}
/paddle/scripts/cpplint.py
WORKING_DIRECTORY
${
CMAKE_CURRENT_SOURCE_DIR
}
)
WORKING_DIRECTORY
${
CMAKE_CURRENT_SOURCE_DIR
}
)
add_custom_target
(
${
base_filename
}
.cpplint DEPENDS
${
CUR_GEN
}
)
add_dependencies
(
${
TARGET_NAME
}
${
base_filename
}
.cpplint
)
endif
()
endif
()
endforeach
()
endforeach
()
endif
()
endif
()
...
...
cmake/util.cmake
浏览文件 @
a07cef33
...
@@ -118,7 +118,6 @@ endfunction()
...
@@ -118,7 +118,6 @@ endfunction()
macro
(
add_unittest_without_exec TARGET_NAME
)
macro
(
add_unittest_without_exec TARGET_NAME
)
add_executable
(
${
TARGET_NAME
}
${
ARGN
}
)
add_executable
(
${
TARGET_NAME
}
${
ARGN
}
)
link_paddle_test
(
${
TARGET_NAME
}
)
link_paddle_test
(
${
TARGET_NAME
}
)
add_style_check_target
(
${
TARGET_NAME
}
${
ARGN
}
)
endmacro
()
endmacro
()
# add_unittest
# add_unittest
...
...
paddle/framework/CMakeLists.txt
浏览文件 @
a07cef33
...
@@ -12,13 +12,15 @@ cc_test(variable_test SRCS variable_test.cc)
...
@@ -12,13 +12,15 @@ cc_test(variable_test SRCS variable_test.cc)
cc_library
(
scope SRCS scope.cc
)
cc_library
(
scope SRCS scope.cc
)
cc_test
(
scope_test SRCS scope_test.cc DEPS scope
)
cc_test
(
scope_test SRCS scope_test.cc DEPS scope
)
proto_library
(
attr
_type SRCS attr_typ
e.proto
)
proto_library
(
attr
ibute_proto SRCS attribut
e.proto
)
proto_library
(
op_proto SRCS op_proto.proto DEPS attr
_type
)
proto_library
(
op_proto SRCS op_proto.proto DEPS attr
ibute_proto
)
proto_library
(
op_desc SRCS op_desc.proto DEPS attr
_type
)
proto_library
(
op_desc SRCS op_desc.proto DEPS attr
ibute_proto
)
cc_test
(
op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf
)
cc_test
(
op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf
)
cc_test
(
op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf
)
cc_test
(
op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf
)
cc_library
(
operator SRCS operator.cc DEPS op_desc device_context tensor scope
)
cc_library
(
attribute SRCS attribute.cc DEPS op_desc op_proto
)
cc_library
(
operator SRCS operator.cc DEPS op_desc device_context tensor scope attribute
)
cc_test
(
operator_test SRCS operator_test.cc DEPS operator op_registry
)
cc_test
(
operator_test SRCS operator_test.cc DEPS operator op_registry
)
cc_library
(
grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator
)
cc_library
(
grad_op_builder SRCS grad_op_builder.cc DEPS op_proto operator
)
...
@@ -26,7 +28,7 @@ cc_library(op_registry SRCS op_registry.cc DEPS op_desc grad_op_builder)
...
@@ -26,7 +28,7 @@ cc_library(op_registry SRCS op_registry.cc DEPS op_desc grad_op_builder)
cc_test
(
op_registry_test SRCS op_registry_test.cc DEPS op_registry
)
cc_test
(
op_registry_test SRCS op_registry_test.cc DEPS op_registry
)
cc_test
(
grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op
)
cc_test
(
grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op
)
py_proto_compile
(
framework_py_proto SRCS attr
_typ
e.proto op_proto.proto op_desc.proto
)
py_proto_compile
(
framework_py_proto SRCS attr
ibut
e.proto op_proto.proto op_desc.proto
)
# Generate an empty __init__.py to make framework_py_proto as a valid python module.
# Generate an empty __init__.py to make framework_py_proto as a valid python module.
add_custom_target
(
framework_py_proto_init ALL COMMAND
${
CMAKE_COMMAND
}
-E touch __init__.py
)
add_custom_target
(
framework_py_proto_init ALL COMMAND
${
CMAKE_COMMAND
}
-E touch __init__.py
)
add_dependencies
(
framework_py_proto framework_py_proto_init
)
add_dependencies
(
framework_py_proto framework_py_proto_init
)
...
...
paddle/framework/attribute.cc
0 → 100644
浏览文件 @
a07cef33
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/attribute.h"
#include <vector>
namespace
paddle
{
namespace
framework
{
template
<
>
AttrType
AttrTypeID
<
int
>
()
{
return
INT
;
}
template
<
>
AttrType
AttrTypeID
<
float
>
()
{
return
FLOAT
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
string
>
()
{
return
STRING
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
int
>>
()
{
return
INTS
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
float
>>
()
{
return
FLOATS
;
}
template
<
>
AttrType
AttrTypeID
<
std
::
vector
<
std
::
string
>>
()
{
return
STRINGS
;
}
Attribute
GetAttrValue
(
const
AttrDesc
&
attr_desc
)
{
switch
(
attr_desc
.
type
())
{
case
paddle
::
framework
::
AttrType
::
INT
:
{
return
attr_desc
.
i
();
}
case
paddle
::
framework
::
AttrType
::
FLOAT
:
{
return
attr_desc
.
f
();
}
case
paddle
::
framework
::
AttrType
::
STRING
:
{
return
attr_desc
.
s
();
}
case
paddle
::
framework
::
AttrType
::
INTS
:
{
std
::
vector
<
int
>
val
(
attr_desc
.
ints_size
());
for
(
int
i
=
0
;
i
<
attr_desc
.
ints_size
();
++
i
)
{
val
[
i
]
=
attr_desc
.
ints
(
i
);
}
return
val
;
}
case
paddle
::
framework
::
AttrType
::
FLOATS
:
{
std
::
vector
<
float
>
val
(
attr_desc
.
floats_size
());
for
(
int
i
=
0
;
i
<
attr_desc
.
floats_size
();
++
i
)
{
val
[
i
]
=
attr_desc
.
floats
(
i
);
}
return
val
;
}
case
paddle
::
framework
::
AttrType
::
STRINGS
:
{
std
::
vector
<
std
::
string
>
val
(
attr_desc
.
strings_size
());
for
(
int
i
=
0
;
i
<
attr_desc
.
strings_size
();
++
i
)
{
val
[
i
]
=
attr_desc
.
strings
(
i
);
}
return
val
;
}
}
PADDLE_ENFORCE
(
false
,
"Unknown OpDesc::AttrDesc::type !"
);
return
boost
::
blank
();
}
}
// namespace framework
}
// namespace paddle
paddle/framework/attr
_checker
.h
→
paddle/framework/attr
ibute
.h
浏览文件 @
a07cef33
...
@@ -6,6 +6,9 @@
...
@@ -6,6 +6,9 @@
#include <unordered_map>
#include <unordered_map>
#include <unordered_set>
#include <unordered_set>
#include <vector>
#include <vector>
#include "paddle/framework/attribute.pb.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/platform/enforce.h"
#include "paddle/platform/enforce.h"
namespace
paddle
{
namespace
paddle
{
...
@@ -14,8 +17,14 @@ namespace framework {
...
@@ -14,8 +17,14 @@ namespace framework {
typedef
boost
::
variant
<
boost
::
blank
,
int
,
float
,
std
::
string
,
std
::
vector
<
int
>
,
typedef
boost
::
variant
<
boost
::
blank
,
int
,
float
,
std
::
string
,
std
::
vector
<
int
>
,
std
::
vector
<
float
>
,
std
::
vector
<
std
::
string
>>
std
::
vector
<
float
>
,
std
::
vector
<
std
::
string
>>
Attribute
;
Attribute
;
typedef
std
::
unordered_map
<
std
::
string
,
Attribute
>
AttributeMap
;
typedef
std
::
unordered_map
<
std
::
string
,
Attribute
>
AttributeMap
;
template
<
typename
T
>
AttrType
AttrTypeID
();
Attribute
GetAttrValue
(
const
AttrDesc
&
attr_desc
);
// check whether a value(attribute) fit a certain limit
// check whether a value(attribute) fit a certain limit
template
<
typename
T
>
template
<
typename
T
>
class
LargerThanChecker
{
class
LargerThanChecker
{
...
...
paddle/framework/attr
_typ
e.proto
→
paddle/framework/attr
ibut
e.proto
浏览文件 @
a07cef33
文件已移动
paddle/framework/backward.cc
浏览文件 @
a07cef33
...
@@ -59,19 +59,17 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
...
@@ -59,19 +59,17 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// If all input gradients of forwarding operator do not need to calculate,
// If all input gradients of forwarding operator do not need to calculate,
// just return an NOP. Not return null ptr because NOP does not take
// just return an NOP. Not return null ptr because NOP does not take
// too much time for calculation, but it is useful for simplifying logic.
// too much time for calculation, but it is useful for simplifying logic.
if
(
AllInSet
(
forwardOp
.
inputs_
,
OperatorBase
::
GRAD_VAR_SUFFIX
(),
if
(
AllInSet
(
forwardOp
.
inputs_
,
kGradVarSuffix
,
no_grad_names
))
{
no_grad_names
))
{
return
NOP
();
return
NOP
();
}
}
// All output gradients of forwarding operator do not need to calculate.
// All output gradients of forwarding operator do not need to calculate.
// Then all input gradients cannot be computed at all, and we put them into
// Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP.
// `no_grad_names` set. Return an NOP.
if
(
AllInSet
(
forwardOp
.
outputs_
,
OperatorBase
::
GRAD_VAR_SUFFIX
(),
if
(
AllInSet
(
forwardOp
.
outputs_
,
kGradVarSuffix
,
no_grad_names
))
{
no_grad_names
))
{
for
(
auto
&
name
:
forwardOp
.
inputs_
)
{
for
(
auto
&
name
:
forwardOp
.
inputs_
)
{
// Mark all input is not need
// Mark all input is not need
no_grad_names
.
insert
(
name
+
OperatorBase
::
GRAD_VAR_SUFFIX
()
);
no_grad_names
.
insert
(
name
+
kGradVarSuffix
);
}
}
return
NOP
();
return
NOP
();
}
}
...
@@ -134,9 +132,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
...
@@ -134,9 +132,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
std
::
shared_ptr
<
OperatorBase
>
grad_op
=
OpRegistry
::
CreateGradOp
(
forwardOp
);
std
::
shared_ptr
<
OperatorBase
>
grad_op
=
OpRegistry
::
CreateGradOp
(
forwardOp
);
for
(
std
::
string
&
grad_input
:
grad_op
->
inputs_
)
{
for
(
std
::
string
&
grad_input
:
grad_op
->
inputs_
)
{
if
(
no_grad_names
.
count
(
grad_input
))
{
if
(
no_grad_names
.
count
(
grad_input
))
{
std
::
string
prefix
=
grad_input
.
substr
(
std
::
string
prefix
=
0
,
grad_input
.
size
()
-
OperatorBase
::
GRAD_VAR_SUFFIX
()
.
size
());
grad_input
.
substr
(
0
,
grad_input
.
size
()
-
kGradVarSuffix
.
size
());
grad_input
=
prefix
+
OperatorBase
::
ZERO_VAR_SUFFIX
()
;
grad_input
=
prefix
+
kZeroVarSuffix
;
// If part of input gradient of that operator is not calculated, fill
// If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient.
// zero variables to that input gradient.
...
@@ -147,7 +145,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
...
@@ -147,7 +145,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
for
(
std
::
string
&
grad_output
:
grad_op
->
outputs_
)
{
for
(
std
::
string
&
grad_output
:
grad_op
->
outputs_
)
{
if
(
no_grad_names
.
count
(
grad_output
))
{
if
(
no_grad_names
.
count
(
grad_output
))
{
grad_output
=
OperatorBase
::
EMPTY_VAR_NAME
()
;
grad_output
=
kEmptyVarName
;
}
}
}
}
...
@@ -168,14 +166,14 @@ std::shared_ptr<OperatorBase> Backward(
...
@@ -168,14 +166,14 @@ std::shared_ptr<OperatorBase> Backward(
std
::
unordered_set
<
std
::
string
>
no_grad_names
;
std
::
unordered_set
<
std
::
string
>
no_grad_names
;
no_grad_names
.
reserve
(
no_grad_vars
.
size
());
no_grad_names
.
reserve
(
no_grad_vars
.
size
());
no_grad_names
.
insert
(
OperatorBase
::
EMPTY_VAR_NAME
()
+
no_grad_names
.
insert
(
kEmptyVarName
+
kGradVarSuffix
);
OperatorBase
::
GRAD_VAR_SUFFIX
());
for
(
auto
&
name
:
no_grad_vars
)
{
for
(
auto
&
name
:
no_grad_vars
)
{
no_grad_names
.
insert
(
name
+
OperatorBase
::
GRAD_VAR_SUFFIX
()
);
no_grad_names
.
insert
(
name
+
kGradVarSuffix
);
}
}
size_t
uid
=
0
;
size_t
uid
=
0
;
return
BackwardRecursive
(
forwardOp
,
no_grad_names
,
uid
);
return
BackwardRecursive
(
forwardOp
,
no_grad_names
,
uid
);
}
}
}
// namespace framework
}
// namespace framework
}
// namespace paddle
}
// namespace paddle
paddle/framework/backward_test.cc
浏览文件 @
a07cef33
...
@@ -78,14 +78,14 @@ class FcOp : public ops::NetOp {
...
@@ -78,14 +78,14 @@ class FcOp : public ops::NetOp {
{
Output
(
"mul_result"
)},
{}));
{
Output
(
"mul_result"
)},
{}));
auto
b_name
=
Input
(
"b"
);
auto
b_name
=
Input
(
"b"
);
std
::
string
before_act
=
"mul_result"
;
std
::
string
before_act
=
"mul_result"
;
if
(
b_name
!=
EMPTY_VAR_NAME
()
)
{
if
(
b_name
!=
kEmptyVarName
)
{
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"mul_result"
),
b_name
},
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"mul_result"
),
b_name
},
{
Output
(
"add_result"
)},
{}));
{
Output
(
"add_result"
)},
{}));
before_act
=
"add_result"
;
before_act
=
"add_result"
;
}
else
{
}
else
{
auto
out_varname
=
Output
(
"add_result"
);
auto
out_varname
=
Output
(
"add_result"
);
if
(
out_varname
!=
EMPTY_VAR_NAME
()
)
{
if
(
out_varname
!=
kEmptyVarName
)
{
this
->
Rename
(
out_varname
,
EMPTY_VAR_NAME
()
);
this
->
Rename
(
out_varname
,
kEmptyVarName
);
}
}
}
}
...
@@ -163,13 +163,12 @@ TEST(Backward, simple_op_grad) {
...
@@ -163,13 +163,12 @@ TEST(Backward, simple_op_grad) {
ASSERT_NE
(
fwd
,
nullptr
);
ASSERT_NE
(
fwd
,
nullptr
);
auto
gop
=
f
::
OpRegistry
::
CreateGradOp
(
*
fwd
);
auto
gop
=
f
::
OpRegistry
::
CreateGradOp
(
*
fwd
);
ASSERT_EQ
(
4UL
,
gop
->
inputs_
.
size
());
ASSERT_EQ
(
4UL
,
gop
->
inputs_
.
size
());
ASSERT_EQ
(
f
::
OperatorBase
::
EMPTY_VAR_NAME
()
,
gop
->
inputs_
[
0
]);
ASSERT_EQ
(
f
::
kEmptyVarName
,
gop
->
inputs_
[
0
]);
ASSERT_EQ
(
"rowwise_add_grad"
,
gop
->
type_
);
ASSERT_EQ
(
"rowwise_add_grad"
,
gop
->
type_
);
ASSERT_EQ
(
"X"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()
,
gop
->
outputs_
[
0
]);
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
gop
->
outputs_
[
0
]);
ASSERT_EQ
(
"b"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()
,
gop
->
outputs_
[
1
]);
ASSERT_EQ
(
"b"
+
f
::
kGradVarSuffix
,
gop
->
outputs_
[
1
]);
ASSERT_EQ
(
"X"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
gop
->
Output
(
"X"
+
f
::
kGradVarSuffix
));
gop
->
Output
(
"X"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()));
}
}
TEST
(
Backward
,
simple_op_not_need_grad
)
{
TEST
(
Backward
,
simple_op_not_need_grad
)
{
...
@@ -177,7 +176,7 @@ TEST(Backward, simple_op_not_need_grad) {
...
@@ -177,7 +176,7 @@ TEST(Backward, simple_op_not_need_grad) {
ASSERT_NE
(
fwd
,
nullptr
);
ASSERT_NE
(
fwd
,
nullptr
);
auto
gop
=
f
::
Backward
(
*
fwd
,
{
"X"
});
auto
gop
=
f
::
Backward
(
*
fwd
,
{
"X"
});
ASSERT_EQ
(
std
::
find
(
gop
->
outputs_
.
begin
(),
gop
->
outputs_
.
end
(),
ASSERT_EQ
(
std
::
find
(
gop
->
outputs_
.
begin
(),
gop
->
outputs_
.
end
(),
"X"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()
),
"X"
+
f
::
kGradVarSuffix
),
gop
->
outputs_
.
end
());
gop
->
outputs_
.
end
());
auto
no_input_gop
=
f
::
Backward
(
*
fwd
,
{
"X"
,
"b"
});
auto
no_input_gop
=
f
::
Backward
(
*
fwd
,
{
"X"
,
"b"
});
...
@@ -210,8 +209,8 @@ TEST(Backward, net_fc_backward_normal) {
...
@@ -210,8 +209,8 @@ TEST(Backward, net_fc_backward_normal) {
}
}
TEST
(
Backward
,
net_fc_backward_not_have_b
)
{
TEST
(
Backward
,
net_fc_backward_not_have_b
)
{
std
::
shared_ptr
<
f
::
OperatorBase
>
fwd
=
f
::
OpRegistry
::
CreateOp
(
std
::
shared_ptr
<
f
::
OperatorBase
>
fwd
=
"fc"
,
{
"X"
,
"w"
,
f
::
OperatorBase
::
EMPTY_VAR_NAME
()
},
f
::
OpRegistry
::
CreateOp
(
"fc"
,
{
"X"
,
"w"
,
f
::
kEmptyVarName
},
{
"mul_result"
,
"add_result"
,
"tmp"
},
{});
{
"mul_result"
,
"add_result"
,
"tmp"
},
{});
ASSERT_NE
(
fwd
,
nullptr
);
ASSERT_NE
(
fwd
,
nullptr
);
std
::
shared_ptr
<
f
::
OperatorBase
>
gop
=
f
::
Backward
(
*
fwd
,
{});
std
::
shared_ptr
<
f
::
OperatorBase
>
gop
=
f
::
Backward
(
*
fwd
,
{});
...
@@ -242,24 +241,21 @@ TEST(Backward, net_input_of_network_not_need_grad) {
...
@@ -242,24 +241,21 @@ TEST(Backward, net_input_of_network_not_need_grad) {
std
::
unordered_set
<
std
::
string
>
all_output
=
std
::
unordered_set
<
std
::
string
>
(
std
::
unordered_set
<
std
::
string
>
all_output
=
std
::
unordered_set
<
std
::
string
>
(
bwd_net
->
outputs_
.
begin
(),
bwd_net
->
outputs_
.
end
());
bwd_net
->
outputs_
.
begin
(),
bwd_net
->
outputs_
.
end
());
all_output
.
erase
(
f
::
OperatorBase
::
EMPTY_VAR_NAME
()
);
all_output
.
erase
(
f
::
kEmptyVarName
);
for
(
auto
&
out
:
{
"W1"
,
"b1"
,
"hidden0"
,
"W2"
,
"b2"
})
{
for
(
auto
&
out
:
{
"W1"
,
"b1"
,
"hidden0"
,
"W2"
,
"b2"
})
{
ASSERT_NE
(
all_output
.
find
(
out
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
ASSERT_NE
(
all_output
.
find
(
out
+
f
::
kGradVarSuffix
),
all_output
.
end
());
all_output
.
end
());
}
}
// Not Generated X
// Not Generated X
ASSERT_EQ
(
all_output
.
find
(
"X"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
ASSERT_EQ
(
all_output
.
find
(
"X"
+
f
::
kGradVarSuffix
),
all_output
.
end
());
all_output
.
end
());
ASSERT_EQ
(
2UL
,
bwd_net
->
ops_
.
size
());
ASSERT_EQ
(
2UL
,
bwd_net
->
ops_
.
size
());
ASSERT_TRUE
(
bwd_net
->
ops_
[
1
]
->
IsNetOp
());
ASSERT_TRUE
(
bwd_net
->
ops_
[
1
]
->
IsNetOp
());
auto
first_fc_grad
=
static_cast
<
ops
::
NetOp
*>
(
bwd_net
->
ops_
[
1
].
get
());
auto
first_fc_grad
=
static_cast
<
ops
::
NetOp
*>
(
bwd_net
->
ops_
[
1
].
get
());
ASSERT_EQ
(
3UL
,
first_fc_grad
->
ops_
.
size
());
ASSERT_EQ
(
3UL
,
first_fc_grad
->
ops_
.
size
());
ASSERT_EQ
(
ASSERT_EQ
(
f
::
kEmptyVarName
,
f
::
OperatorBase
::
EMPTY_VAR_NAME
(),
first_fc_grad
->
ops_
[
2
]
->
Output
(
"A"
+
f
::
kGradVarSuffix
));
first_fc_grad
->
ops_
[
2
]
->
Output
(
"A"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()));
}
}
TEST
(
Backward
,
net_shared_weight
)
{
TEST
(
Backward
,
net_shared_weight
)
{
...
@@ -311,17 +307,15 @@ TEST(Backward, op_part_of_output_are_not_need) {
...
@@ -311,17 +307,15 @@ TEST(Backward, op_part_of_output_are_not_need) {
ASSERT_EQ
(
1UL
,
fill_zero
.
inputs_
.
size
());
ASSERT_EQ
(
1UL
,
fill_zero
.
inputs_
.
size
());
ASSERT_EQ
(
"Z"
,
fill_zero
.
inputs_
[
0
]);
ASSERT_EQ
(
"Z"
,
fill_zero
.
inputs_
[
0
]);
ASSERT_EQ
(
1UL
,
fill_zero
.
outputs_
.
size
());
ASSERT_EQ
(
1UL
,
fill_zero
.
outputs_
.
size
());
ASSERT_EQ
(
"Z"
+
f
::
OperatorBase
::
ZERO_VAR_SUFFIX
()
,
fill_zero
.
outputs_
[
0
]);
ASSERT_EQ
(
"Z"
+
f
::
kZeroVarSuffix
,
fill_zero
.
outputs_
[
0
]);
auto
&
d_many_out
=
*
net
->
ops_
[
1
];
auto
&
d_many_out
=
*
net
->
ops_
[
1
];
ASSERT_EQ
(
"many_output_op_grad"
,
d_many_out
.
type_
);
ASSERT_EQ
(
"many_output_op_grad"
,
d_many_out
.
type_
);
ASSERT_EQ
(
1UL
+
2UL
+
2UL
,
d_many_out
.
inputs_
.
size
());
// I/O/OG
ASSERT_EQ
(
1UL
+
2UL
+
2UL
,
d_many_out
.
inputs_
.
size
());
// I/O/OG
ASSERT_EQ
(
"Z"
+
f
::
OperatorBase
::
ZERO_VAR_SUFFIX
(),
ASSERT_EQ
(
"Z"
+
f
::
kZeroVarSuffix
,
d_many_out
.
Input
(
"z"
+
f
::
kGradVarSuffix
));
d_many_out
.
Input
(
"z"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()));
ASSERT_EQ
(
"Y"
+
f
::
kGradVarSuffix
,
d_many_out
.
Input
(
"y"
+
f
::
kGradVarSuffix
));
ASSERT_EQ
(
"Y"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
ASSERT_EQ
(
"X"
+
f
::
kGradVarSuffix
,
d_many_out
.
Input
(
"y"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()));
d_many_out
.
Output
(
"x"
+
f
::
kGradVarSuffix
));
ASSERT_EQ
(
"X"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
d_many_out
.
Output
(
"x"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()));
}
}
TEST
(
Backward
,
op_part_of_input_are_not_need
)
{
TEST
(
Backward
,
op_part_of_input_are_not_need
)
{
...
@@ -331,12 +325,10 @@ TEST(Backward, op_part_of_input_are_not_need) {
...
@@ -331,12 +325,10 @@ TEST(Backward, op_part_of_input_are_not_need) {
ASSERT_EQ
(
grad_mul
.
type_
,
"mul_grad"
);
ASSERT_EQ
(
grad_mul
.
type_
,
"mul_grad"
);
ASSERT_EQ
(
grad_mul
.
inputs_
.
size
(),
2UL
+
1UL
+
1UL
);
ASSERT_EQ
(
grad_mul
.
inputs_
.
size
(),
2UL
+
1UL
+
1UL
);
ASSERT_EQ
(
grad_mul
.
outputs_
.
size
(),
2UL
);
ASSERT_EQ
(
grad_mul
.
outputs_
.
size
(),
2UL
);
ASSERT_EQ
(
grad_mul
.
Output
(
"A"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
ASSERT_EQ
(
grad_mul
.
Output
(
"A"
+
f
::
kGradVarSuffix
),
f
::
kEmptyVarName
);
f
::
OperatorBase
::
EMPTY_VAR_NAME
());
ASSERT_EQ
(
grad_mul
.
Output
(
"B"
+
f
::
kGradVarSuffix
),
"b"
+
f
::
kGradVarSuffix
);
ASSERT_EQ
(
grad_mul
.
Output
(
"B"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
+
f
::
kGradVarSuffix
),
"b"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
"out"
+
f
::
kGradVarSuffix
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
"out"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
ASSERT_EQ
(
grad_mul
.
Input
(
"A"
),
"a"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"A"
),
"a"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"B"
),
"b"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"B"
),
"b"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
),
"out"
);
ASSERT_EQ
(
grad_mul
.
Input
(
"Out"
),
"out"
);
...
@@ -368,23 +360,4 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
...
@@ -368,23 +360,4 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) {
EXPECT_EQ
(
bwd_net
->
ops_
[
1
]
->
outputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
1
]
->
outputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
inputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
inputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
outputs_
.
size
(),
0UL
);
EXPECT_EQ
(
bwd_net
->
ops_
[
2
]
->
outputs_
.
size
(),
0UL
);
/*
EXPECT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()),
f::OperatorBase::EMPTY_VAR_NAME());
EXPECT_EQ(grad_fc.Output("W" + f::OperatorBase::GRAD_VAR_SUFFIX()),
"w3" + f::OperatorBase::GRAD_VAR_SUFFIX());
EXPECT_EQ(grad_fc.Output("b" + f::OperatorBase::GRAD_VAR_SUFFIX()),
"b3" + f::OperatorBase::GRAD_VAR_SUFFIX());
EXPECT_EQ(grad_fc.Output("mul_result" + f::OperatorBase::GRAD_VAR_SUFFIX()),
"mul_out3" + f::OperatorBase::GRAD_VAR_SUFFIX());
EXPECT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()),
"out3" + f::OperatorBase::GRAD_VAR_SUFFIX());
EXPECT_EQ(grad_fc.Input("X"), "out2");
EXPECT_EQ(grad_fc.Input("W"), "w3");
EXPECT_EQ(grad_fc.Input("mul_result"), "mul_out3");
EXPECT_EQ(grad_fc.Input("add_result"), "tmp_out3");
EXPECT_EQ(grad_fc.Input("Out"), "out3");
*/
}
}
paddle/framework/grad_op_builder.cc
浏览文件 @
a07cef33
...
@@ -56,8 +56,7 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op,
...
@@ -56,8 +56,7 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op,
for
(
const
auto
&
arg
:
src_arg_list
)
{
for
(
const
auto
&
arg
:
src_arg_list
)
{
std
::
string
src_name
=
arg
.
name
();
std
::
string
src_name
=
arg
.
name
();
std
::
string
dst_name
=
std
::
string
dst_name
=
is_grad
?
src_name
+
kGradVarSuffix
:
src_name
;
is_grad
?
src_name
+
OperatorBase
::
GRAD_VAR_SUFFIX
()
:
src_name
;
(
*
dst_op
->
in_out_idxs_
)[
dst_name
]
=
idx
++
;
(
*
dst_op
->
in_out_idxs_
)[
dst_name
]
=
idx
++
;
int
src_arg_idx
=
src_op
->
in_out_idxs_
->
at
(
src_name
);
int
src_arg_idx
=
src_op
->
in_out_idxs_
->
at
(
src_name
);
int
src_begin
=
int
src_begin
=
...
@@ -65,10 +64,9 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op,
...
@@ -65,10 +64,9 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op,
int
src_end
=
src_format
==
nullptr
?
src_arg_idx
+
1
int
src_end
=
src_format
==
nullptr
?
src_arg_idx
+
1
:
src_format
->
at
(
src_arg_idx
+
1
);
:
src_format
->
at
(
src_arg_idx
+
1
);
for
(
int
i
=
src_begin
;
i
<
src_end
;
++
i
)
{
for
(
int
i
=
src_begin
;
i
<
src_end
;
++
i
)
{
std
::
string
s
=
is_grad
?
src_inout
[
i
]
+
OperatorBase
::
GRAD_VAR_SUFFIX
()
std
::
string
s
=
:
arg
.
ignore_gradient
()
is_grad
?
src_inout
[
i
]
+
kGradVarSuffix
?
OperatorBase
::
EMPTY_VAR_NAME
()
:
(
arg
.
ignore_gradient
()
?
kEmptyVarName
:
src_inout
[
i
]);
:
src_inout
[
i
];
dst_inout
.
emplace_back
(
s
);
dst_inout
.
emplace_back
(
s
);
}
}
if
(
dst_format
!=
nullptr
)
{
if
(
dst_format
!=
nullptr
)
{
...
...
paddle/framework/grad_op_builder_test.cc
浏览文件 @
a07cef33
...
@@ -83,24 +83,21 @@ TEST(GradOpBuilder, MutiInOut) {
...
@@ -83,24 +83,21 @@ TEST(GradOpBuilder, MutiInOut) {
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out1"
),
"out1"
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out1"
),
"out1"
);
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"Out2_mult"
),
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"Out2_mult"
),
std
::
vector
<
std
::
string
>
({
"out2_1"
,
"out2_2"
}));
std
::
vector
<
std
::
string
>
({
"out2_1"
,
"out2_2"
}));
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out1"
+
f
::
kGradVarSuffix
),
"out1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
"out1"
+
f
::
kGradVarSuffix
);
EXPECT_EQ
(
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"Out2_mult"
+
f
::
kGradVarSuffix
),
grad_test_op
->
Inputs
(
"Out2_mult"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
std
::
vector
<
std
::
string
>
(
std
::
vector
<
std
::
string
>
(
{
"out2_1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
{
"out2_1"
+
f
::
kGradVarSuffix
,
"out2_2"
+
f
::
kGradVarSuffix
}));
"out2_2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()}));
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5UL
);
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5UL
);
EXPECT_EQ
(
grad_test_op
->
Output
(
"In1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
EXPECT_EQ
(
grad_test_op
->
Output
(
"In1"
+
f
::
kGradVarSuffix
),
"in1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
"in1"
+
f
::
kGradVarSuffix
);
EXPECT_EQ
(
EXPECT_EQ
(
grad_test_op
->
Outputs
(
"In2_mult"
+
f
::
kGradVarSuffix
),
grad_test_op
->
Outputs
(
"In2_mult"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
std
::
vector
<
std
::
string
>
({
"in2_1"
+
f
::
kGradVarSuffix
,
std
::
vector
<
std
::
string
>
({
"in2_1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
"in2_2"
+
f
::
kGradVarSuffix
,
"in2_2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
"in2_3"
+
f
::
kGradVarSuffix
}));
"in2_3"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()}));
EXPECT_EQ
(
grad_test_op
->
Output
(
"In3"
+
f
::
kGradVarSuffix
),
EXPECT_EQ
(
grad_test_op
->
Output
(
"In3"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
"in3"
+
f
::
kGradVarSuffix
);
"in3"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
}
}
TEST
(
GradOpBuilder
,
IOIgnoredInGradient
)
{
TEST
(
GradOpBuilder
,
IOIgnoredInGradient
)
{
...
@@ -116,30 +113,25 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
...
@@ -116,30 +113,25 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
5UL
+
3UL
+
3UL
);
ASSERT_EQ
(
grad_test_op
->
inputs_
.
size
(),
5UL
+
3UL
+
3UL
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Input
(
"In1"
),
"in1"
);
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In2_mult"
),
std
::
vector
<
std
::
string
>
({
f
::
OperatorBase
::
EMPTY_VAR_NAME
(),
std
::
vector
<
std
::
string
>
({
f
::
kEmptyVarName
,
f
::
kEmptyVarName
}));
f
::
OperatorBase
::
EMPTY_VAR_NAME
()}));
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In3_mult"
),
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"In3_mult"
),
std
::
vector
<
std
::
string
>
({
"in3_1"
,
"in3_2"
}));
std
::
vector
<
std
::
string
>
({
"in3_1"
,
"in3_2"
}));
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"Out1_mult"
),
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"Out1_mult"
),
std
::
vector
<
std
::
string
>
({
"out1_1"
,
"out1_2"
}));
std
::
vector
<
std
::
string
>
({
"out1_1"
,
"out1_2"
}));
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out2"
),
f
::
OperatorBase
::
EMPTY_VAR_NAME
());
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out2"
),
f
::
kEmptyVarName
);
EXPECT_EQ
(
EXPECT_EQ
(
grad_test_op
->
Inputs
(
"Out1_mult"
+
f
::
kGradVarSuffix
),
grad_test_op
->
Inputs
(
"Out1_mult"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
std
::
vector
<
std
::
string
>
(
std
::
vector
<
std
::
string
>
(
{
"out1_1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
{
"out1_1"
+
f
::
kGradVarSuffix
,
"out1_2"
+
f
::
kGradVarSuffix
}));
"out1_2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()}));
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out2"
+
f
::
kGradVarSuffix
),
EXPECT_EQ
(
grad_test_op
->
Input
(
"Out2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
"out2"
+
f
::
kGradVarSuffix
);
"out2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5UL
);
ASSERT_EQ
(
grad_test_op
->
outputs_
.
size
(),
5UL
);
EXPECT_EQ
(
grad_test_op
->
Output
(
"In1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
EXPECT_EQ
(
grad_test_op
->
Output
(
"In1"
+
f
::
kGradVarSuffix
),
"in1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
());
"in1"
+
f
::
kGradVarSuffix
);
EXPECT_EQ
(
EXPECT_EQ
(
grad_test_op
->
Outputs
(
"In2_mult"
+
f
::
kGradVarSuffix
),
grad_test_op
->
Outputs
(
"In2_mult"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
std
::
vector
<
std
::
string
>
(
std
::
vector
<
std
::
string
>
({
"in2_1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
{
"in2_1"
+
f
::
kGradVarSuffix
,
"in2_2"
+
f
::
kGradVarSuffix
}));
"in2_2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()}));
EXPECT_EQ
(
grad_test_op
->
Outputs
(
"In3_mult"
+
f
::
kGradVarSuffix
),
EXPECT_EQ
(
std
::
vector
<
std
::
string
>
(
grad_test_op
->
Outputs
(
"In3_mult"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()),
{
"in3_1"
+
f
::
kGradVarSuffix
,
"in3_2"
+
f
::
kGradVarSuffix
}));
std
::
vector
<
std
::
string
>
({
"in3_1"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
(),
"in3_2"
+
f
::
OperatorBase
::
GRAD_VAR_SUFFIX
()}));
}
}
paddle/framework/op_desc.proto
浏览文件 @
a07cef33
...
@@ -15,7 +15,7 @@ limitations under the License. */
...
@@ -15,7 +15,7 @@ limitations under the License. */
syntax
=
"proto2"
;
syntax
=
"proto2"
;
package
paddle
.
framework
;
package
paddle
.
framework
;
import
"attr
_typ
e.proto"
;
import
"attr
ibut
e.proto"
;
// AttrDesc is used to describe Attributes of an Operator. It contain's
// AttrDesc is used to describe Attributes of an Operator. It contain's
// name, type, and value of Attribute.
// name, type, and value of Attribute.
...
...
paddle/framework/op_proto.proto
浏览文件 @
a07cef33
...
@@ -21,7 +21,7 @@ limitations under the License. */
...
@@ -21,7 +21,7 @@ limitations under the License. */
syntax
=
"proto2"
;
syntax
=
"proto2"
;
package
paddle
.
framework
;
package
paddle
.
framework
;
import
"attr
_typ
e.proto"
;
import
"attr
ibut
e.proto"
;
// Attribute protocol message for 3rd-party language binding.
// Attribute protocol message for 3rd-party language binding.
// It will store the Op support what attribute and what type.
// It will store the Op support what attribute and what type.
...
...
paddle/framework/op_registry.cc
浏览文件 @
a07cef33
...
@@ -14,37 +14,8 @@ limitations under the License. */
...
@@ -14,37 +14,8 @@ limitations under the License. */
#include <paddle/framework/op_registry.h>
#include <paddle/framework/op_registry.h>
namespace
paddle
{
#include <vector>
namespace
framework
{
template
<
>
void
AttrTypeHelper
::
SetAttrType
<
int
>
(
AttrProto
*
attr
)
{
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
INT
);
}
template
<
>
void
AttrTypeHelper
::
SetAttrType
<
float
>
(
AttrProto
*
attr
)
{
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOAT
);
}
template
<
>
void
AttrTypeHelper
::
SetAttrType
<
std
::
string
>
(
AttrProto
*
attr
)
{
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
STRING
);
}
template
<
>
namespace
paddle
{
void
AttrTypeHelper
::
SetAttrType
<
std
::
vector
<
int
>>
(
AttrProto
*
attr
)
{
namespace
framework
{}
// namespace framework
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
INTS
);
}
template
<
>
void
AttrTypeHelper
::
SetAttrType
<
std
::
vector
<
float
>>
(
AttrProto
*
attr
)
{
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
FLOATS
);
}
template
<
>
void
AttrTypeHelper
::
SetAttrType
<
std
::
vector
<
std
::
string
>>
(
AttrProto
*
attr
)
{
attr
->
set_type
(
paddle
::
framework
::
AttrType
::
STRINGS
);
}
}
// namespace framework
}
// namespace paddle
}
// namespace paddle
paddle/framework/op_registry.h
浏览文件 @
a07cef33
...
@@ -19,7 +19,7 @@ limitations under the License. */
...
@@ -19,7 +19,7 @@ limitations under the License. */
#include <type_traits>
#include <type_traits>
#include <unordered_map>
#include <unordered_map>
#include <unordered_set>
#include <unordered_set>
#include "paddle/framework/attr
_checker
.h"
#include "paddle/framework/attr
ibute
.h"
#include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/scope.h"
...
@@ -27,49 +27,6 @@ limitations under the License. */
...
@@ -27,49 +27,6 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
// helper class to set attribute type
struct
AttrTypeHelper
{
template
<
typename
T
>
static
void
SetAttrType
(
AttrProto
*
attr
);
static
Attribute
GetAttrValue
(
const
AttrDesc
&
attr_desc
)
{
switch
(
attr_desc
.
type
())
{
case
paddle
:
:
framework
::
AttrType
::
INT
:
{
return
attr_desc
.
i
();
}
case
paddle
:
:
framework
::
AttrType
::
FLOAT
:
{
return
attr_desc
.
f
();
}
case
paddle
:
:
framework
::
AttrType
::
STRING
:
{
return
attr_desc
.
s
();
}
case
paddle
:
:
framework
::
AttrType
::
INTS
:
{
std
::
vector
<
int
>
val
(
attr_desc
.
ints_size
());
for
(
int
i
=
0
;
i
<
attr_desc
.
ints_size
();
++
i
)
{
val
[
i
]
=
attr_desc
.
ints
(
i
);
}
return
val
;
}
case
paddle
:
:
framework
::
AttrType
::
FLOATS
:
{
std
::
vector
<
float
>
val
(
attr_desc
.
floats_size
());
for
(
int
i
=
0
;
i
<
attr_desc
.
floats_size
();
++
i
)
{
val
[
i
]
=
attr_desc
.
floats
(
i
);
}
return
val
;
}
case
paddle
:
:
framework
::
AttrType
::
STRINGS
:
{
std
::
vector
<
std
::
string
>
val
(
attr_desc
.
strings_size
());
for
(
int
i
=
0
;
i
<
attr_desc
.
strings_size
();
++
i
)
{
val
[
i
]
=
attr_desc
.
strings
(
i
);
}
return
val
;
}
}
PADDLE_ENFORCE
(
false
,
"Unknown OpDesc::AttrDesc::type !"
);
return
boost
::
blank
();
}
};
// this class not only make proto but also init attribute checkers.
// this class not only make proto but also init attribute checkers.
class
OpProtoAndCheckerMaker
{
class
OpProtoAndCheckerMaker
{
public:
public:
...
@@ -136,7 +93,7 @@ class OpProtoAndCheckerMaker {
...
@@ -136,7 +93,7 @@ class OpProtoAndCheckerMaker {
*
attr
->
mutable_name
()
=
name
;
*
attr
->
mutable_name
()
=
name
;
*
attr
->
mutable_comment
()
=
comment
;
*
attr
->
mutable_comment
()
=
comment
;
attr
->
set_generated
(
generated
);
attr
->
set_generated
(
generated
);
AttrTypeHelper
::
SetAttrType
<
T
>
(
attr
);
attr
->
set_type
(
AttrTypeID
<
T
>
()
);
return
op_checker_
->
AddAttrChecker
<
T
>
(
name
);
return
op_checker_
->
AddAttrChecker
<
T
>
(
name
);
}
}
...
@@ -297,7 +254,7 @@ class OpRegistry {
...
@@ -297,7 +254,7 @@ class OpRegistry {
AttributeMap
attrs
;
AttributeMap
attrs
;
for
(
auto
&
attr
:
op_desc
.
attrs
())
{
for
(
auto
&
attr
:
op_desc
.
attrs
())
{
attrs
[
attr
.
name
()]
=
AttrTypeHelper
::
GetAttrValue
(
attr
);
attrs
[
attr
.
name
()]
=
GetAttrValue
(
attr
);
}
}
return
CreateOp
(
op_desc
.
type
(),
inputs
,
outputs
,
attrs
);
return
CreateOp
(
op_desc
.
type
(),
inputs
,
outputs
,
attrs
);
...
@@ -341,7 +298,7 @@ class OpRegistry {
...
@@ -341,7 +298,7 @@ class OpRegistry {
static
void
GenerateTempVariableName
(
OperatorBase
*
op
)
{
static
void
GenerateTempVariableName
(
OperatorBase
*
op
)
{
static
std
::
atomic
<
size_t
>
gUniqId
(
0UL
);
static
std
::
atomic
<
size_t
>
gUniqId
(
0UL
);
for
(
auto
&
outname
:
op
->
outputs_
)
{
for
(
auto
&
outname
:
op
->
outputs_
)
{
if
(
outname
==
OperatorBase
::
TMP_VAR_NAME
()
)
{
if
(
outname
==
kTempVarName
)
{
outname
+=
op
->
type_
;
outname
+=
op
->
type_
;
outname
+=
"@"
;
outname
+=
"@"
;
outname
+=
std
::
to_string
(
gUniqId
.
fetch_add
(
1
));
outname
+=
std
::
to_string
(
gUniqId
.
fetch_add
(
1
));
...
...
paddle/framework/operator.h
浏览文件 @
a07cef33
...
@@ -20,7 +20,7 @@ limitations under the License. */
...
@@ -20,7 +20,7 @@ limitations under the License. */
#include <unordered_map>
#include <unordered_map>
#include <vector>
#include <vector>
#include "paddle/framework/attr
_checker
.h"
#include "paddle/framework/attr
ibute
.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/scope.h"
...
@@ -32,9 +32,29 @@ limitations under the License. */
...
@@ -32,9 +32,29 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
/// If a variable is a empty variable, that name will be used.
const
std
::
string
kEmptyVarName
=
"@EMPTY@"
;
/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
const
std
::
string
kTempVarName
=
"@TEMP@"
;
/// If a variable's name has a certain suffix, it means that the
/// variable is the gradient of another varibale.
/// e.g. Variable "x@GRAD" is the gradient of varibale "x".
const
std
::
string
kGradVarSuffix
=
"@GRAD"
;
/// Variables with this suffix are supposed to be filled up with zeros.
const
std
::
string
kZeroVarSuffix
=
"@ZERO"
;
inline
std
::
string
GradVarName
(
const
std
::
string
&
var_name
)
{
return
var_name
+
kGradVarSuffix
;
}
class
OperatorBase
;
class
OperatorBase
;
class
InferShapeContext
;
class
InferShapeContext
;
class
ExecutionContext
;
class
ExecutionContext
;
/**
/**
* OperatorBase has the basic element that Net will call to do computation.
* OperatorBase has the basic element that Net will call to do computation.
* Only CreateOperator from OpRegistry will new Operator directly. User
* Only CreateOperator from OpRegistry will new Operator directly. User
...
@@ -43,25 +63,6 @@ class ExecutionContext;
...
@@ -43,25 +63,6 @@ class ExecutionContext;
*/
*/
class
OperatorBase
{
class
OperatorBase
{
public:
public:
/// If a variable is a empty variable, that name will be used.
static
std
::
string
EMPTY_VAR_NAME
()
{
return
"@EMPTY@"
;
}
/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
static
std
::
string
TMP_VAR_NAME
()
{
return
"@TEMP@"
;
}
/// If a variable's name has a certain suffix, it means that the
/// variable is the gradient of another varibale.
/// e.g. Variable "x@GRAD" is the gradient of varibale "x".
static
std
::
string
GRAD_VAR_SUFFIX
()
{
return
"@GRAD"
;
}
static
std
::
string
GRAD_VAR_NAME
(
const
std
::
string
&
name
)
{
return
name
+
GRAD_VAR_SUFFIX
();
}
/// Variables with this suffix are supposed to be filled up with zeros.
static
std
::
string
ZERO_VAR_SUFFIX
()
{
return
"@ZERO"
;
}
virtual
~
OperatorBase
()
{}
virtual
~
OperatorBase
()
{}
template
<
typename
T
>
template
<
typename
T
>
...
...
paddle/framework/pybind.cc
浏览文件 @
a07cef33
...
@@ -163,8 +163,8 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -163,8 +163,8 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def_submodule
(
m
.
def_submodule
(
"var_names"
,
"var_names"
,
"The module will return special predefined variable name in Paddle"
)
"The module will return special predefined variable name in Paddle"
)
.
def
(
"empty"
,
OperatorBase
::
EMPTY_VAR_NAME
)
.
def
(
"empty"
,
[]()
{
return
kEmptyVarName
;
}
)
.
def
(
"temp"
,
OperatorBase
::
TMP_VAR_NAME
);
.
def
(
"temp"
,
[]()
{
return
kTempVarName
;
}
);
// clang-format off
// clang-format off
py
::
class_
<
paddle
::
platform
::
DeviceContext
>
(
m
,
"DeviceContext"
)
py
::
class_
<
paddle
::
platform
::
DeviceContext
>
(
m
,
"DeviceContext"
)
.
def_static
(
"create"
,
.
def_static
(
"create"
,
...
...
paddle/gserver/tests/CMakeLists.txt
浏览文件 @
a07cef33
# gserver pacakge unittests
# gserver pacakge unittests
file
(
GLOB_RECURSE GSERVER_HEADER RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"*.h"
)
file
(
GLOB_RECURSE GSERVER_SOURCES RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"*.cpp"
)
add_style_check_target
(
paddle_gserver
${
GSERVER_SOURCES
}
)
add_style_check_target
(
paddle_gserver
${
GSERVER_HEADER
}
)
################### test_ProtoDataProvider ############
################### test_ProtoDataProvider ############
add_unittest_without_exec
(
test_ProtoDataProvider
add_unittest_without_exec
(
test_ProtoDataProvider
test_ProtoDataProvider.cpp
)
test_ProtoDataProvider.cpp
)
...
...
paddle/math/MathUtils.cpp
浏览文件 @
a07cef33
...
@@ -25,7 +25,7 @@ namespace paddle {
...
@@ -25,7 +25,7 @@ namespace paddle {
*/
*/
void
sparseRand
(
void
sparseRand
(
int
*
major
,
int
*
minor
,
int
nnz
,
int
majorLen
,
int
minorMax
,
bool
useGpu
)
{
int
*
major
,
int
*
minor
,
int
nnz
,
int
majorLen
,
int
minorMax
,
bool
useGpu
)
{
CHECK
(
size_t
(
nnz
)
>
size_t
(
1
));
CHECK
(
size_t
(
nnz
)
>
=
size_t
(
1
));
int
*
cpuMajor
;
int
*
cpuMajor
;
int
*
cpuMinor
;
int
*
cpuMinor
;
CpuIVector
cpuMinorVec
(
nnz
);
CpuIVector
cpuMinorVec
(
nnz
);
...
...
paddle/math/tests/test_matrixCompare.cpp
浏览文件 @
a07cef33
...
@@ -79,8 +79,8 @@ void testMatrixMaxSequence(int batchSize, int inputDim) {
...
@@ -79,8 +79,8 @@ void testMatrixMaxSequence(int batchSize, int inputDim) {
}
}
TEST
(
Matrix
,
maxSequence
)
{
TEST
(
Matrix
,
maxSequence
)
{
for
(
auto
batchSize
:
{
1
,
10
,
128
,
1000
,
6000
})
{
for
(
auto
batchSize
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
for
(
auto
inputDim
:
{
1
,
32
,
100
,
512
})
{
for
(
auto
inputDim
:
{
1
,
7
,
131
})
{
// prime numbers close to 1, 8, 128
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
testMatrixMaxSequence
(
batchSize
,
inputDim
);
testMatrixMaxSequence
(
batchSize
,
inputDim
);
}
}
...
@@ -240,14 +240,10 @@ TEST(Matrix, unary) {
...
@@ -240,14 +240,10 @@ TEST(Matrix, unary) {
// inverse matrix
// inverse matrix
testMatrixInverse
(
height
);
testMatrixInverse
(
height
);
#else
#else
LOG
(
WARNING
)
<<
"Cannot run Matrix Inverse Unit Test.
\n
"
LOG
(
WARNING
)
<<
"This version of PaddlePaddle was not built with LAPACK"
<<
"Failed to find lapack library in current system.
\n
"
<<
"support so we cannot test matrix inverse. To test "
<<
"To address this issue, Please adopt one of the following "
<<
"matrix inverse, please install LAPACKE "
"approaches:
\n
"
<<
"and MKL/Openblas/ATLAS, and re-build PaddlePaddle."
;
<<
"1. Simply issue `sudo apt-get install liblapacke-dev` to "
"avoid re-build source code.
\n
"
<<
"2. Install MKL/Openblas/ATLAS and re-build PaddlePaddle "
"source code."
;
#endif
#endif
}
}
}
}
...
@@ -341,8 +337,8 @@ void testMatrixSoftmaxBp(int height, int width) {
...
@@ -341,8 +337,8 @@ void testMatrixSoftmaxBp(int height, int width) {
}
}
TEST
(
Matrix
,
softmax
)
{
TEST
(
Matrix
,
softmax
)
{
for
(
auto
height
:
{
1
,
11
,
73
,
128
,
200
})
{
for
(
auto
height
:
{
1
,
3
,
131
})
{
// prime numbers close to 1, 4, 127
for
(
auto
width
:
{
1
,
32
,
100
,
512
,
1000
})
{
for
(
auto
width
:
{
1
,
17
,
251
})
{
// prime numbers close to 1, 16, 256
VLOG
(
3
)
<<
" height="
<<
height
<<
" width="
<<
width
;
VLOG
(
3
)
<<
" height="
<<
height
<<
" width="
<<
width
;
testMatrixSoftmax
(
height
,
width
);
testMatrixSoftmax
(
height
,
width
);
...
@@ -527,7 +523,7 @@ void testVectorRowFunc(int size) {
...
@@ -527,7 +523,7 @@ void testVectorRowFunc(int size) {
}
}
TEST
(
Vector
,
rowFunc
)
{
TEST
(
Vector
,
rowFunc
)
{
for
(
auto
size
:
{
1
,
5
,
31
,
90
,
150
,
500
,
1000
,
4000
})
{
for
(
auto
size
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
VLOG
(
3
)
<<
" size="
<<
size
;
VLOG
(
3
)
<<
" size="
<<
size
;
testVectorRowFunc
(
size
);
testVectorRowFunc
(
size
);
}
}
...
@@ -604,7 +600,7 @@ void testVectorIsEqual(int size) {
...
@@ -604,7 +600,7 @@ void testVectorIsEqual(int size) {
}
}
TEST
(
Vector
,
Equal
)
{
TEST
(
Vector
,
Equal
)
{
for
(
auto
size
:
{
1
,
5
,
31
,
90
,
150
,
500
,
1000
,
4000
})
{
for
(
auto
size
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
VLOG
(
3
)
<<
" size="
<<
size
;
VLOG
(
3
)
<<
" size="
<<
size
;
testVectorReset
<
int
>
(
size
);
testVectorReset
<
int
>
(
size
);
testVectorReset
<
real
>
(
size
);
testVectorReset
<
real
>
(
size
);
...
@@ -635,9 +631,8 @@ void testMatrixTopK(int samples, int dim, int beamSize) {
...
@@ -635,9 +631,8 @@ void testMatrixTopK(int samples, int dim, int beamSize) {
}
}
TEST
(
Matrix
,
topK
)
{
TEST
(
Matrix
,
topK
)
{
for
(
auto
samples
:
{
1
,
5
,
31
,
90
,
150
,
500
})
{
for
(
auto
samples
:
{
1
,
17
,
131
})
{
// prime numbers close to 1, 16, 127
for
(
auto
dim
:
for
(
auto
dim
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
{
1
,
5
,
8
,
10
,
15
,
64
,
80
,
120
,
256
,
300
,
1280
,
5120
,
50000
})
{
for
(
auto
beamSize
:
{
1
,
5
,
10
,
20
,
40
,
(
int
)
rand
()
%
dim
+
1
})
{
for
(
auto
beamSize
:
{
1
,
5
,
10
,
20
,
40
,
(
int
)
rand
()
%
dim
+
1
})
{
if
(
beamSize
>
dim
)
continue
;
if
(
beamSize
>
dim
)
continue
;
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
...
@@ -650,6 +645,7 @@ TEST(Matrix, topK) {
...
@@ -650,6 +645,7 @@ TEST(Matrix, topK) {
void
testSMatrixTopK
(
int
samples
,
int
dim
,
int
beamSize
,
real
ratio
)
{
void
testSMatrixTopK
(
int
samples
,
int
dim
,
int
beamSize
,
real
ratio
)
{
int
nnz
=
samples
*
dim
*
ratio
;
int
nnz
=
samples
*
dim
*
ratio
;
if
(
nnz
<
1
)
nnz
=
1
;
// Because sparseRand in MathUtil.cpp requires this.
MatrixPtr
cpuSrc
=
std
::
make_shared
<
CpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
cpuSrc
=
std
::
make_shared
<
CpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
gpuSrc
=
std
::
make_shared
<
GpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
gpuSrc
=
std
::
make_shared
<
GpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
cpuVal
=
std
::
make_shared
<
CpuMatrix
>
(
samples
,
beamSize
);
MatrixPtr
cpuVal
=
std
::
make_shared
<
CpuMatrix
>
(
samples
,
beamSize
);
...
@@ -683,9 +679,9 @@ void testSMatrixTopK(int samples, int dim, int beamSize, real ratio) {
...
@@ -683,9 +679,9 @@ void testSMatrixTopK(int samples, int dim, int beamSize, real ratio) {
}
}
TEST
(
SMatrix
,
topK
)
{
TEST
(
SMatrix
,
topK
)
{
for
(
auto
samples
:
{
1
,
5
,
100
})
{
for
(
auto
samples
:
{
1
,
3
,
61
})
{
for
(
auto
dim
:
{
1
0000
,
10000
,
50000
})
{
for
(
auto
dim
:
{
1
,
3
,
61
})
{
for
(
auto
beamSize
:
{
1
,
5
,
40
,
100
,
500
})
{
for
(
auto
beamSize
:
{
1
,
3
,
61
})
{
for
(
auto
ratio
:
{
0.01
,
0.001
})
{
for
(
auto
ratio
:
{
0.01
,
0.001
})
{
if
(
beamSize
>
dim
)
continue
;
if
(
beamSize
>
dim
)
continue
;
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
...
@@ -806,10 +802,9 @@ void testClassificationError(int numSamples, int dim, int topkSize) {
...
@@ -806,10 +802,9 @@ void testClassificationError(int numSamples, int dim, int topkSize) {
}
}
TEST
(
Matrix
,
classificationError
)
{
TEST
(
Matrix
,
classificationError
)
{
for
(
auto
numSamples
:
{
1
,
5
,
31
,
90
,
150
,
300
})
{
for
(
auto
numSamples
:
{
1
,
3
,
31
})
{
for
(
auto
dim
:
for
(
auto
dim
:
{
1
,
3
,
31
})
{
{
1
,
5
,
8
,
10
,
15
,
64
,
80
,
120
,
256
,
300
,
1280
,
5120
,
50000
})
{
for
(
auto
topkSize
:
{
1
,
3
,
(
int
)
rand
()
%
dim
+
1
})
{
for
(
auto
topkSize
:
{
1
,
5
,
10
,
20
,
40
,
(
int
)
rand
()
%
dim
+
1
})
{
if
(
topkSize
>
dim
)
continue
;
if
(
topkSize
>
dim
)
continue
;
VLOG
(
3
)
<<
" sample= "
<<
numSamples
<<
" topkSize= "
<<
topkSize
VLOG
(
3
)
<<
" sample= "
<<
numSamples
<<
" topkSize= "
<<
topkSize
<<
" dim= "
<<
dim
;
<<
" dim= "
<<
dim
;
...
@@ -1016,13 +1011,15 @@ void testAvgPoolFwdBwd(int numSamples,
...
@@ -1016,13 +1011,15 @@ void testAvgPoolFwdBwd(int numSamples,
TensorCheckErr
(
*
inputGrad
,
*
inputGpuGrad
);
TensorCheckErr
(
*
inputGrad
,
*
inputGpuGrad
);
}
}
// TODO(yi): I noticed many such blindly combinatorial tests in this
// file. They are no help to locate defects at all.
TEST
(
Matrix
,
PoolFwdBwd
)
{
TEST
(
Matrix
,
PoolFwdBwd
)
{
for
(
auto
numSamples
:
{
5
,
32
})
{
for
(
auto
numSamples
:
{
1
,
3
})
{
for
(
auto
channels
:
{
1
,
9
,
32
})
{
for
(
auto
channels
:
{
1
,
3
})
{
for
(
auto
imgSizeH
:
{
1
4
,
28
})
{
for
(
auto
imgSizeH
:
{
1
3
,
17
})
{
for
(
auto
imgSizeW
:
{
1
6
,
30
})
{
for
(
auto
imgSizeW
:
{
1
7
,
19
})
{
for
(
auto
sizeX
:
{
2
,
5
})
{
for
(
auto
sizeX
:
{
2
,
3
})
{
for
(
auto
sizeY
:
{
2
,
5
})
{
for
(
auto
sizeY
:
{
2
,
3
})
{
for
(
auto
sH
:
{
1
,
2
})
{
for
(
auto
sH
:
{
1
,
2
})
{
for
(
auto
sW
:
{
1
,
2
})
{
for
(
auto
sW
:
{
1
,
2
})
{
for
(
auto
pH
:
{
0
,
(
sizeY
-
1
)
/
2
})
{
for
(
auto
pH
:
{
0
,
(
sizeY
-
1
)
/
2
})
{
...
@@ -1128,8 +1125,8 @@ TEST(Matrix, MaxOutFwdBwd) {
...
@@ -1128,8 +1125,8 @@ TEST(Matrix, MaxOutFwdBwd) {
}
}
TEST
(
CpuMatrix
,
copyFrom
)
{
TEST
(
CpuMatrix
,
copyFrom
)
{
const
size_t
height
=
1000
;
const
size_t
height
=
31
;
const
size_t
width
=
1000
;
const
size_t
width
=
53
;
CpuMatrix
cpu
(
height
,
width
);
CpuMatrix
cpu
(
height
,
width
);
GpuMatrix
gpu
(
height
,
width
);
GpuMatrix
gpu
(
height
,
width
);
CpuMatrix
copy
(
height
,
width
);
CpuMatrix
copy
(
height
,
width
);
...
@@ -1149,6 +1146,10 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
...
@@ -1149,6 +1146,10 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
IVectorPtr
cpuSequence
;
IVectorPtr
cpuSequence
;
generateSequenceStartPositions
(
batchSize
,
cpuSequence
);
generateSequenceStartPositions
(
batchSize
,
cpuSequence
);
for
(
int
i
=
0
;
i
<
cpuSequence
->
getSize
();
++
i
)
{
(
cpuSequence
->
getData
())[
i
]
+=
1
;
// so no way that maxSeqLen is 0;
}
IVectorPtr
gpuSequence
=
IVector
::
create
(
cpuSequence
->
getSize
(),
true
);
IVectorPtr
gpuSequence
=
IVector
::
create
(
cpuSequence
->
getSize
(),
true
);
gpuSequence
->
copyFrom
(
*
cpuSequence
);
gpuSequence
->
copyFrom
(
*
cpuSequence
);
...
@@ -1156,45 +1157,46 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
...
@@ -1156,45 +1157,46 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
size_t
maxSeqLen
=
*
std
::
max_element
(
cpuSequence
->
getData
(),
size_t
maxSeqLen
=
*
std
::
max_element
(
cpuSequence
->
getData
(),
cpuSequence
->
getData
()
+
numSeq
);
cpuSequence
->
getData
()
+
numSeq
);
printf
(
"numSeq = %ld, maxSeqLen = %ld
\n
"
,
numSeq
,
maxSeqLen
);
MatrixPtr
cBatch
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
cBatch
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
gBatch
=
std
::
make_shared
<
GpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
gBatch
=
std
::
make_shared
<
GpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
cCheck
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
cCheck
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
hl_sequence2batch_copy_padding
(
gBatch
->
getData
(),
//
hl_sequence2batch_copy_padding(gBatch->getData(),
gpuInput
->
getData
(),
//
gpuInput->getData(),
cpuSequence
->
getData
(),
//
cpuSequence->getData(),
inputDim
,
//
inputDim,
maxSeqLen
,
//
maxSeqLen,
numSeq
,
//
numSeq,
false
,
//
false,
true
);
//
true);
cCheck
->
copyFrom
(
*
gBatch
);
//
cCheck->copyFrom(*gBatch);
int
*
seqStart
=
cpuSequence
->
getData
();
//
int* seqStart = cpuSequence->getData();
float
*
batchData
=
cBatch
->
getData
();
//
float* batchData = cBatch->getData();
float
*
seqData
=
cpuInput
->
getData
();
//
float* seqData = cpuInput->getData();
for
(
size_t
i
=
0
;
i
<
maxSeqLen
;
i
++
)
{
//
for (size_t i = 0; i < maxSeqLen; i++) {
for
(
size_t
j
=
0
;
j
<
numSeq
;
j
++
)
{
//
for (size_t j = 0; j < numSeq; j++) {
size_t
sequenceStart
=
seqStart
[
j
];
//
size_t sequenceStart = seqStart[j];
size_t
sequenceLength
=
seqStart
[
j
+
1
]
-
seqStart
[
j
];
//
size_t sequenceLength = seqStart[j + 1] - seqStart[j];
if
(
i
<
sequenceLength
)
{
//
if (i < sequenceLength) {
memcpy
(
batchData
+
(
i
*
numSeq
+
j
)
*
inputDim
,
//
memcpy(batchData + (i * numSeq + j) * inputDim,
seqData
+
(
sequenceStart
+
i
)
*
inputDim
,
//
seqData + (sequenceStart + i) * inputDim,
inputDim
*
sizeof
(
real
));
//
inputDim * sizeof(real));
}
else
{
//
} else {
memset
(
batchData
+
(
i
*
numSeq
+
j
)
*
inputDim
,
//
memset(batchData + (i * numSeq + j) * inputDim,
0
,
//
0,
inputDim
*
sizeof
(
real
));
//
inputDim * sizeof(real));
}
//
}
}
//
}
}
//
}
TensorCheckErr
(
*
cBatch
,
*
cCheck
);
//
TensorCheckErr(*cBatch, *cCheck);
}
}
TEST
(
Matrix
,
warpCTC
)
{
TEST
(
Matrix
,
warpCTC
)
{
for
(
auto
batchSize
:
{
51
,
526
,
2884
})
{
for
(
auto
batchSize
:
{
1
,
3
,
17
})
{
for
(
auto
inputDim
:
{
32
,
512
,
2026
})
{
for
(
auto
inputDim
:
{
1
,
3
,
31
})
{
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
testBatch2seqPadding
(
batchSize
,
inputDim
);
testBatch2seqPadding
(
batchSize
,
inputDim
);
}
}
...
...
paddle/operators/fc_op.cc
浏览文件 @
a07cef33
...
@@ -27,7 +27,7 @@ public:
...
@@ -27,7 +27,7 @@ public:
{
Output
(
"before_act"
)},
{
Output
(
"before_act"
)},
{}));
{}));
auto
b
=
Input
(
"b"
);
auto
b
=
Input
(
"b"
);
if
(
b
!=
EMPTY_VAR_NAME
()
)
{
if
(
b
!=
framework
::
kEmptyVarName
)
{
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
AddOp
(
OpRegistry
::
CreateOp
(
"rowwise_add"
,
{
Output
(
"before_act"
),
Input
(
"b"
)},
{
Output
(
"before_act"
),
Input
(
"b"
)},
{
Output
(
"before_act"
)},
{
Output
(
"before_act"
)},
...
...
paddle/operators/mean_op.cc
浏览文件 @
a07cef33
...
@@ -41,7 +41,7 @@ public:
...
@@ -41,7 +41,7 @@ public:
class
MeanGradOp
:
public
OperatorWithKernel
{
class
MeanGradOp
:
public
OperatorWithKernel
{
protected:
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
ctx
.
Output
<
Tensor
>
(
"X"
+
GRAD_VAR_SUFFIX
()
)
ctx
.
Output
<
Tensor
>
(
"X"
+
framework
::
kGradVarSuffix
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
}
};
};
...
...
paddle/operators/mean_op.h
浏览文件 @
a07cef33
...
@@ -39,10 +39,10 @@ template <typename Place, typename T>
...
@@ -39,10 +39,10 @@ template <typename Place, typename T>
class
MeanGradKernel
:
public
OpKernel
{
class
MeanGradKernel
:
public
OpKernel
{
public:
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
auto
OG
=
context
.
Input
<
Tensor
>
(
"Out"
+
OperatorBase
::
GRAD_VAR_SUFFIX
()
);
auto
OG
=
context
.
Input
<
Tensor
>
(
"Out"
+
framework
::
kGradVarSuffix
);
PADDLE_ENFORCE
(
framework
::
product
(
OG
->
dims
())
==
1
,
PADDLE_ENFORCE
(
framework
::
product
(
OG
->
dims
())
==
1
,
"Mean Gradient should be scalar"
);
"Mean Gradient should be scalar"
);
auto
IG
=
context
.
Output
<
Tensor
>
(
"X"
+
OperatorBase
::
GRAD_VAR_SUFFIX
()
);
auto
IG
=
context
.
Output
<
Tensor
>
(
"X"
+
framework
::
kGradVarSuffix
);
IG
->
mutable_data
<
T
>
(
context
.
GetPlace
());
IG
->
mutable_data
<
T
>
(
context
.
GetPlace
());
T
ig_size
=
(
T
)
framework
::
product
(
IG
->
dims
());
T
ig_size
=
(
T
)
framework
::
product
(
IG
->
dims
());
...
...
paddle/operators/softmax_op.cc
浏览文件 @
a07cef33
...
@@ -48,12 +48,12 @@ protected:
...
@@ -48,12 +48,12 @@ protected:
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1UL
,
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1UL
,
"Output of SoftmaxOpGrad should be 1"
);
"Output of SoftmaxOpGrad should be 1"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
"Y"
)
!=
nullptr
,
"Input(Y) should not be null"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
"Y"
)
!=
nullptr
,
"Input(Y) should not be null"
);
PADDLE_ENFORCE
(
ctx
.
InputVar
(
GRAD_VAR_NAME
(
"Y"
))
!=
nullptr
,
PADDLE_ENFORCE
(
ctx
.
InputVar
(
framework
::
GradVarName
(
"Y"
))
!=
nullptr
,
"Input(Y@GRAD) should not be null"
);
"Input(Y@GRAD) should not be null"
);
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
()
==
PADDLE_ENFORCE
(
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
()
==
ctx
.
Input
<
Tensor
>
(
GRAD_VAR_NAME
(
"Y"
))
->
dims
(),
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
))
->
dims
(),
"the shape of Input(0) and Input(1) should be the same"
);
"the shape of Input(0) and Input(1) should be the same"
);
ctx
.
Output
<
Tensor
>
(
GRAD_VAR_NAME
(
"X"
))
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
))
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
());
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"Y"
)
->
dims
());
}
}
};
};
...
...
paddle/operators/softmax_op.h
浏览文件 @
a07cef33
...
@@ -68,8 +68,8 @@ public:
...
@@ -68,8 +68,8 @@ public:
std
::
shared_ptr
<
Tensor
>
scale_
=
std
::
make_shared
<
Tensor
>
();
std
::
shared_ptr
<
Tensor
>
scale_
=
std
::
make_shared
<
Tensor
>
();
auto
Y
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
Y
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
dY
=
context
.
Input
<
Tensor
>
(
OperatorBase
::
GRAD_VAR_NAME
(
"Y"
));
auto
dY
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
dX
=
context
.
Output
<
Tensor
>
(
OperatorBase
::
GRAD_VAR_NAME
(
"X"
));
auto
dX
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
int
batch_size
=
Y
->
dims
()[
0
];
const
int
batch_size
=
Y
->
dims
()[
0
];
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录