Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
a8c6ce9b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a8c6ce9b
编写于
9月 28, 2017
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of github.com:baidu/Paddle into feature/BetterActivationKern
上级
337b7ebe
9fbf94b6
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
290 addition
and
73 deletion
+290
-73
paddle/framework/CMakeLists.txt
paddle/framework/CMakeLists.txt
+1
-1
paddle/framework/grad_op_builder.cc
paddle/framework/grad_op_builder.cc
+39
-0
paddle/framework/grad_op_builder.h
paddle/framework/grad_op_builder.h
+3
-0
paddle/framework/grad_op_builder_test.cc
paddle/framework/grad_op_builder_test.cc
+79
-0
paddle/framework/op_desc.cc
paddle/framework/op_desc.cc
+11
-0
paddle/framework/op_desc.h
paddle/framework/op_desc.h
+6
-0
paddle/operators/activation_op.cc
paddle/operators/activation_op.cc
+14
-0
paddle/operators/activation_op.h
paddle/operators/activation_op.h
+22
-1
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+7
-9
paddle/pybind/tensor_py.h
paddle/pybind/tensor_py.h
+14
-1
python/paddle/v2/framework/tests/op_test.py
python/paddle/v2/framework/tests/op_test.py
+60
-42
python/paddle/v2/framework/tests/test_activation_op.py
python/paddle/v2/framework/tests/test_activation_op.py
+17
-0
python/paddle/v2/framework/tests/test_cross_entropy_op.py
python/paddle/v2/framework/tests/test_cross_entropy_op.py
+1
-1
python/paddle/v2/framework/tests/test_elementwise_mul_op.py
python/paddle/v2/framework/tests/test_elementwise_mul_op.py
+15
-17
python/paddle/v2/framework/tests/test_prelu_op.py
python/paddle/v2/framework/tests/test_prelu_op.py
+1
-1
未找到文件。
paddle/framework/CMakeLists.txt
浏览文件 @
a8c6ce9b
...
...
@@ -26,7 +26,7 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto)
cc_library
(
operator SRCS operator.cc DEPS op_info device_context tensor scope
)
cc_test
(
operator_test SRCS operator_test.cc DEPS operator op_registry
)
cc_library
(
grad_op_builder SRCS grad_op_builder.cc DEPS operator
)
cc_library
(
grad_op_builder SRCS grad_op_builder.cc DEPS operator
proto_desc
)
cc_library
(
op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info
)
cc_test
(
op_registry_test SRCS op_registry_test.cc DEPS op_registry
)
cc_test
(
grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op
)
...
...
paddle/framework/grad_op_builder.cc
浏览文件 @
a8c6ce9b
...
...
@@ -54,5 +54,44 @@ OperatorBase* BuildGradOp(const OperatorBase* op) {
return
grad_info
.
Creator
()(
info
.
grad_op_type_
,
inputs
,
outputs
,
op
->
Attrs
());
}
static
void
TransOpDescArg
(
const
OpDescBind
*
src_op
,
const
OpArgType
&
src_type
,
bool
is_grad
,
OpDescBind
*
dst_op
,
const
OpArgType
&
dst_type
)
{
PADDLE_ENFORCE
(
dst_op
!=
nullptr
,
"Protobuf desc of gradient op must be initialized first."
);
const
auto
&
proto
=
OpInfoMap
::
Instance
().
Get
(
src_op
->
Type
()).
Proto
();
const
auto
&
src_arg_list
=
src_type
==
OpArgType
::
IN
?
proto
.
inputs
()
:
proto
.
outputs
();
for
(
const
auto
&
arg
:
src_arg_list
)
{
if
(
arg
.
not_in_gradient
()
&&
!
is_grad
)
continue
;
const
std
::
string
src_name
=
arg
.
name
();
std
::
vector
<
std
::
string
>
vars
=
src_type
==
OpArgType
::
IN
?
src_op
->
Input
(
src_name
)
:
src_op
->
Output
(
src_name
);
if
(
is_grad
)
{
for
(
std
::
string
&
var
:
vars
)
{
var
=
GradVarName
(
var
);
}
}
std
::
string
dst_name
=
is_grad
?
GradVarName
(
src_name
)
:
src_name
;
dst_type
==
OpArgType
::
IN
?
dst_op
->
SetInput
(
dst_name
,
vars
)
:
dst_op
->
SetOutput
(
dst_name
,
vars
);
}
}
void
CompleteGradOpDesc
(
const
OpDescBind
*
forw_op
,
OpDescBind
*
grad_op
)
{
auto
&
info
=
OpInfoMap
::
Instance
().
Get
(
forw_op
->
Type
());
PADDLE_ENFORCE
(
info
.
HasGradientOp
());
grad_op
->
SetType
(
info
.
grad_op_type_
);
TransOpDescArg
(
forw_op
,
OpArgType
::
IN
,
false
,
grad_op
,
OpArgType
::
IN
);
TransOpDescArg
(
forw_op
,
OpArgType
::
OUT
,
false
,
grad_op
,
OpArgType
::
IN
);
TransOpDescArg
(
forw_op
,
OpArgType
::
OUT
,
true
,
grad_op
,
OpArgType
::
IN
);
TransOpDescArg
(
forw_op
,
OpArgType
::
IN
,
true
,
grad_op
,
OpArgType
::
OUT
);
grad_op
->
SetAttrMap
(
forw_op
->
GetAttrMap
());
}
}
// namespace framework
}
// namespace paddle
paddle/framework/grad_op_builder.h
浏览文件 @
a8c6ce9b
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/framework/op_desc.h"
#include "paddle/framework/operator.h"
namespace
paddle
{
...
...
@@ -21,5 +22,7 @@ namespace framework {
OperatorBase
*
BuildGradOp
(
const
OperatorBase
*
op
);
void
CompleteGradOpDesc
(
const
OpDescBind
*
forw_op
,
OpDescBind
*
grad_op
);
}
// namespace framework
}
// namespace paddle
paddle/framework/grad_op_builder_test.cc
浏览文件 @
a8c6ce9b
...
...
@@ -120,3 +120,82 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"in3_1"
),
f
::
GradVarName
(
"in3_2"
)}));
}
TEST
(
GradOpDescBuilder
,
MutiInOut
)
{
f
::
OpDescBind
*
forw_op
=
new
f
::
OpDescBind
();
forw_op
->
SetType
(
"mult_io"
);
forw_op
->
SetInput
(
"In1"
,
{
"in1"
});
forw_op
->
SetInput
(
"In2_mult"
,
{
"in2_1"
,
"in2_2"
,
"in2_3"
});
forw_op
->
SetInput
(
"In3"
,
{
"in3"
});
forw_op
->
SetOutput
(
"Out1"
,
{
"out1"
});
forw_op
->
SetOutput
(
"Out2_mult"
,
{
"out2_1"
,
"out2_2"
});
f
::
OpDescBind
*
grad_op
=
new
f
::
OpDescBind
();
f
::
CompleteGradOpDesc
(
forw_op
,
grad_op
);
EXPECT_EQ
(
grad_op
->
Type
(),
"mult_io_grad"
);
ASSERT_EQ
(
grad_op
->
InputNames
().
size
(),
3UL
+
2UL
+
2UL
);
EXPECT_EQ
(
grad_op
->
Input
(
"In1"
),
std
::
vector
<
std
::
string
>
({
"in1"
}));
EXPECT_EQ
(
grad_op
->
Input
(
"In2_mult"
),
std
::
vector
<
std
::
string
>
({
"in2_1"
,
"in2_2"
,
"in2_3"
}));
EXPECT_EQ
(
grad_op
->
Input
(
"In3"
),
std
::
vector
<
std
::
string
>
({
"in3"
}));
EXPECT_EQ
(
grad_op
->
Input
(
"Out1"
),
std
::
vector
<
std
::
string
>
({
"out1"
}));
EXPECT_EQ
(
grad_op
->
Input
(
"Out2_mult"
),
std
::
vector
<
std
::
string
>
({
"out2_1"
,
"out2_2"
}));
EXPECT_EQ
(
grad_op
->
Input
(
f
::
GradVarName
(
"Out1"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"out1"
)}));
EXPECT_EQ
(
grad_op
->
Input
(
f
::
GradVarName
(
"Out2_mult"
)),
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"out2_1"
),
f
::
GradVarName
(
"out2_2"
)}));
ASSERT_EQ
(
grad_op
->
OutputNames
().
size
(),
3UL
);
EXPECT_EQ
(
grad_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in1"
)}));
EXPECT_EQ
(
grad_op
->
Output
(
f
::
GradVarName
(
"In2_mult"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in2_1"
),
f
::
GradVarName
(
"in2_2"
),
f
::
GradVarName
(
"in2_3"
)}));
EXPECT_EQ
(
grad_op
->
Output
(
f
::
GradVarName
(
"In3"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in3"
)}));
delete
forw_op
;
delete
grad_op
;
}
TEST
(
GradOpDescBuilder
,
IOIgnoredInGradient
)
{
f
::
OpDescBind
*
forw_op
=
new
f
::
OpDescBind
();
forw_op
->
SetType
(
"io_ignored"
);
forw_op
->
SetInput
(
"In1"
,
{
"in1"
});
forw_op
->
SetInput
(
"In2_mult"
,
{
"in2_1"
,
"in2_2"
});
forw_op
->
SetInput
(
"In3_mult"
,
{
"in3_1"
,
"in3_2"
});
forw_op
->
SetOutput
(
"Out1_mult"
,
{
"out1_1"
,
"out1_2"
});
forw_op
->
SetOutput
(
"Out2"
,
{
"out2"
});
f
::
OpDescBind
*
grad_op
=
new
f
::
OpDescBind
();
f
::
CompleteGradOpDesc
(
forw_op
,
grad_op
);
EXPECT_EQ
(
grad_op
->
Type
(),
"io_ignored_grad"
);
// 'In2' and 'Out2' are ignored in gradient calculating
ASSERT_EQ
(
grad_op
->
InputNames
().
size
(),
2UL
+
1UL
+
2UL
);
EXPECT_EQ
(
grad_op
->
Input
(
"In1"
),
std
::
vector
<
std
::
string
>
({
"in1"
}));
EXPECT_EQ
(
grad_op
->
Input
(
"In3_mult"
),
std
::
vector
<
std
::
string
>
({
"in3_1"
,
"in3_2"
}));
EXPECT_EQ
(
grad_op
->
Input
(
"Out1_mult"
),
std
::
vector
<
std
::
string
>
({
"out1_1"
,
"out1_2"
}));
EXPECT_EQ
(
grad_op
->
Input
(
f
::
GradVarName
(
"Out1_mult"
)),
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"out1_1"
),
f
::
GradVarName
(
"out1_2"
)}));
EXPECT_EQ
(
grad_op
->
Input
(
f
::
GradVarName
(
"Out2"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"out2"
)}));
ASSERT_EQ
(
grad_op
->
OutputNames
().
size
(),
3UL
);
EXPECT_EQ
(
grad_op
->
Output
(
f
::
GradVarName
(
"In1"
)),
std
::
vector
<
std
::
string
>
({
f
::
GradVarName
(
"in1"
)}));
EXPECT_EQ
(
grad_op
->
Output
(
f
::
GradVarName
(
"In2_mult"
)),
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"in2_1"
),
f
::
GradVarName
(
"in2_2"
)}));
EXPECT_EQ
(
grad_op
->
Output
(
f
::
GradVarName
(
"In3_mult"
)),
std
::
vector
<
std
::
string
>
(
{
f
::
GradVarName
(
"in3_1"
),
f
::
GradVarName
(
"in3_2"
)}));
delete
forw_op
;
delete
grad_op
;
}
\ No newline at end of file
paddle/framework/op_desc.cc
浏览文件 @
a8c6ce9b
...
...
@@ -89,6 +89,12 @@ void OpDescBind::SetAttr(const std::string &name, const Attribute &v) {
need_update_
=
true
;
}
void
OpDescBind
::
SetAttrMap
(
const
std
::
unordered_map
<
std
::
string
,
Attribute
>
&
attr_map
)
{
attrs_
=
attr_map
;
need_update_
=
true
;
}
Attribute
OpDescBind
::
GetAttr
(
const
std
::
string
&
name
)
const
{
auto
it
=
attrs_
.
find
(
name
);
PADDLE_ENFORCE
(
it
!=
attrs_
.
end
(),
"Attribute %s is not found"
,
name
);
...
...
@@ -101,6 +107,11 @@ int OpDescBind::GetBlockAttr(const std::string &name) const {
return
boost
::
get
<
BlockDesc
*>
(
it
->
second
)
->
idx
();
}
const
std
::
unordered_map
<
std
::
string
,
Attribute
>
&
OpDescBind
::
GetAttrMap
()
const
{
return
attrs_
;
}
void
OpDescBind
::
Sync
()
{
if
(
need_update_
)
{
this
->
op_desc_
.
mutable_inputs
()
->
Clear
();
...
...
paddle/framework/op_desc.h
浏览文件 @
a8c6ce9b
...
...
@@ -60,10 +60,16 @@ class OpDescBind {
void
SetBlockAttr
(
const
std
::
string
&
name
,
BlockDescBind
&
block
);
// Only be used in C++
void
SetAttrMap
(
const
std
::
unordered_map
<
std
::
string
,
Attribute
>
&
attr_map
);
Attribute
GetAttr
(
const
std
::
string
&
name
)
const
;
int
GetBlockAttr
(
const
std
::
string
&
name
)
const
;
// Only be used in C++
const
std
::
unordered_map
<
std
::
string
,
Attribute
>
&
GetAttrMap
()
const
;
private:
struct
SetAttrDescVisitor
:
public
boost
::
static_visitor
<
void
>
{
explicit
SetAttrDescVisitor
(
OpDesc
::
Attr
*
attr
)
:
attr_
(
attr
)
{}
...
...
paddle/operators/activation_op.cc
浏览文件 @
a8c6ce9b
...
...
@@ -132,6 +132,17 @@ class SquareOpMaker : public framework::OpProtoAndCheckerMaker {
}
};
class
SoftsignOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
SoftsignOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"Input of Softsign operator"
);
AddOutput
(
"Y"
,
"Output of Softsign operator"
);
AddComment
(
"Softsign activation operator, softsign(x) = x / (1 + |x|)"
);
}
};
template
<
typename
AttrType
>
class
BReluOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
...
...
@@ -223,6 +234,9 @@ REGISTER_OP(log, ops::ActivationOp, ops::LogOpMaker, log_grad,
REGISTER_OP
(
square
,
ops
::
ActivationOp
,
ops
::
SquareOpMaker
,
square_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP
(
softsign
,
ops
::
ActivationOp
,
ops
::
SoftsignOpMaker
,
softsign_grad
,
ops
::
ActivationOpGrad
);
REGISTER_OP
(
brelu
,
ops
::
ActivationOp
,
ops
::
BReluOpMaker
<
float
>
,
brelu_grad
,
ops
::
ActivationOpGrad
);
...
...
paddle/operators/activation_op.h
浏览文件 @
a8c6ce9b
...
...
@@ -262,6 +262,26 @@ struct BReluGradFunctor : public BaseActivationFunctor<T> {
}
};
// softsign(x) = x / (1 + |x|)
template
<
typename
T
>
struct
SoftsignFunctor
:
public
BaseActivationFunctor
<
T
>
{
template
<
typename
Device
,
typename
X
,
typename
Y
>
void
operator
()(
Device
d
,
X
x
,
Y
y
)
{
y
.
device
(
d
)
=
x
/
(
static_cast
<
T
>
(
1
)
+
x
.
abs
());
}
};
// d(softsign(x))/dx = 1 / (1 + |x|)^2
// Taken from https://en.wikipedia.org/wiki/Activation_function
template
<
typename
T
>
struct
SoftsignGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
template
<
typename
Device
,
typename
X
,
typename
Y
,
typename
dY
,
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Y
y
,
dY
dy
,
dX
dx
)
{
dx
.
device
(
d
)
=
dy
*
(
static_cast
<
T
>
(
1
)
/
(
static_cast
<
T
>
(
1
)
+
x
.
abs
()).
square
());
}
};
template
<
typename
T
>
struct
SoftReluFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
threshold
;
...
...
@@ -358,4 +378,5 @@ struct STanhGradFunctor : public BaseActivationFunctor<T> {
__macro(brelu, BReluFunctor, BReluGradFunctor); \
__macro(soft_relu, SoftReluFunctor, SoftReluGradFunctor); \
__macro(pow, PowFunctor, PowGradFunctor); \
__macro(stanh, STanhFunctor, STanhGradFunctor)
__macro(stanh, STanhFunctor, STanhGradFunctor); \
__macro(softsign, SoftsignFunctor, SoftsignGradFunctor)
paddle/pybind/pybind.cc
浏览文件 @
a8c6ce9b
...
...
@@ -77,20 +77,18 @@ PYBIND11_PLUGIN(core) {
})
.
def
(
"set"
,
PyCPUTensorSetFromArray
<
float
>
)
.
def
(
"set"
,
PyCPUTensorSetFromArray
<
int
>
)
.
def
(
"set"
,
PyCPUTensorSetFromArray
<
double
>
)
#ifndef PADDLE_ONLY_CPU
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
float
>
)
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
int
>
)
.
def
(
"set"
,
PyCUDATensorSetFromArray
<
double
>
)
#endif
.
def
(
"shape"
,
[](
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_float_element"
,
[](
Tensor
&
self
,
size_t
offset
,
float
f
)
{
// TODO(yuyang18): Only support GPU now.
self
.
data
<
float
>
()[
offset
]
=
f
;
})
.
def
(
"get_float_element"
,
[](
Tensor
&
self
,
size_t
offset
)
->
float
{
// TODO(yuyang18): Only support GPU now.
return
self
.
data
<
float
>
()[
offset
];
});
.
def
(
"set_float_element"
,
TensorSetElement
<
float
>
)
.
def
(
"get_float_element"
,
TensorGetElement
<
float
>
)
.
def
(
"set_double_element"
,
TensorSetElement
<
double
>
)
.
def
(
"get_double_element"
,
TensorGetElement
<
double
>
)
.
def
(
"dtype"
,
[](
Tensor
&
self
)
{
return
ToDataType
(
self
.
type
());
});
py
::
class_
<
LoDTensor
,
Tensor
>
(
m
,
"LoDTensor"
)
.
def_buffer
(
...
...
paddle/pybind/tensor_py.h
浏览文件 @
a8c6ce9b
...
...
@@ -73,10 +73,23 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
};
}
// namespace details
inline
py
::
buffer_info
CastToPyBuffer
(
framework
::
Tensor
&
tensor
)
{
auto
buffer_info
=
details
::
CastToPyBufferImpl
<
true
,
0
,
float
,
int
>
()(
tensor
);
auto
buffer_info
=
details
::
CastToPyBufferImpl
<
true
,
0
,
float
,
int
,
double
>
()(
tensor
);
return
buffer_info
;
}
template
<
typename
T
>
T
TensorGetElement
(
framework
::
Tensor
&
self
,
size_t
offset
)
{
PADDLE_ENFORCE
(
platform
::
is_cpu_place
(
self
.
place
()));
return
self
.
data
<
T
>
()[
offset
];
}
template
<
typename
T
>
void
TensorSetElement
(
framework
::
Tensor
&
self
,
size_t
offset
,
T
elem
)
{
PADDLE_ENFORCE
(
platform
::
is_cpu_place
(
self
.
place
()));
self
.
data
<
T
>
()[
offset
]
=
elem
;
}
template
<
typename
T
>
void
PyCPUTensorSetFromArray
(
framework
::
Tensor
&
self
,
...
...
python/paddle/v2/framework/tests/op_test.py
浏览文件 @
a8c6ce9b
...
...
@@ -12,17 +12,19 @@ def grad_var_name(var_name):
def
create_op
(
scope
,
op_type
,
inputs
,
outputs
,
attrs
):
kwargs
=
dict
()
def
__create_var__
(
name
,
var_name
):
scope
.
new_var
(
var_name
)
kwargs
[
name
].
append
(
var_name
)
for
in_name
,
in_dup
in
Operator
.
get_op_inputs
(
op_type
):
if
in_name
in
inputs
:
kwargs
[
in_name
]
=
[]
if
in_dup
:
sub_in
=
inputs
[
in_name
]
for
sub_in_name
,
_
in
sub_in
:
var
=
scope
.
new_var
(
sub_in_name
)
kwargs
[
in_name
].
append
(
sub_in_name
)
__create_var__
(
in_name
,
sub_in_name
)
else
:
var
=
scope
.
new_var
(
in_name
)
kwargs
[
in_name
].
append
(
in_name
)
__create_var__
(
in_name
,
in_name
)
for
out_name
,
out_dup
in
Operator
.
get_op_outputs
(
op_type
):
if
out_name
in
outputs
:
...
...
@@ -30,11 +32,9 @@ def create_op(scope, op_type, inputs, outputs, attrs):
if
out_dup
:
sub_out
=
outputs
[
out_name
]
for
sub_out_name
,
_
in
sub_out
:
var
=
scope
.
new_var
(
sub_out_name
)
kwargs
[
out_name
].
append
(
sub_out_name
)
__create_var__
(
out_name
,
sub_out_name
)
else
:
var
=
scope
.
new_var
(
out_name
)
kwargs
[
out_name
].
append
(
out_name
)
__create_var__
(
out_name
,
out_name
)
for
attr_name
in
Operator
.
get_op_attr_names
(
op_type
):
if
attr_name
in
attrs
:
...
...
@@ -44,49 +44,46 @@ def create_op(scope, op_type, inputs, outputs, attrs):
def
set_input
(
scope
,
op
,
inputs
,
place
):
def
__set_input__
(
var_name
,
var
):
tensor
=
scope
.
find_var
(
var_name
).
get_tensor
()
if
isinstance
(
var
,
tuple
):
tensor
.
set_lod
(
var
[
1
])
var
=
var
[
0
]
tensor
.
set_dims
(
var
.
shape
)
tensor
.
set
(
var
,
place
)
for
in_name
,
in_dup
in
Operator
.
get_op_inputs
(
op
.
type
()):
if
in_name
in
inputs
:
if
in_dup
:
sub_in
=
inputs
[
in_name
]
for
sub_in_name
,
sub_in_val
in
sub_in
:
var
=
scope
.
find_var
(
sub_in_name
)
tensor
=
var
.
get_tensor
()
sub_in_array
=
sub_in_val
[
0
]
\
if
isinstance
(
sub_in_val
,
tuple
)
else
sub_in_val
tensor
.
set_dims
(
sub_in_array
.
shape
)
tensor
.
set
(
sub_in_array
,
place
)
if
isinstance
(
sub_in_val
,
tuple
):
tensor
.
set_lod
(
sub_in_val
[
1
])
__set_input__
(
sub_in_name
,
sub_in_val
)
else
:
var
=
scope
.
find_var
(
in_name
)
tensor
=
var
.
get_tensor
()
in_val
=
inputs
[
in_name
]
in_array
=
in_val
[
0
]
if
isinstance
(
in_val
,
tuple
)
else
in_val
tensor
.
set_dims
(
in_array
.
shape
)
tensor
.
set
(
in_array
,
place
)
if
isinstance
(
in_val
,
tuple
):
tensor
.
set_lod
(
in_val
[
1
])
__set_input__
(
in_name
,
inputs
[
in_name
])
def
set_output_grad
(
scope
,
op
,
outputs
,
place
):
def
__set_tensor__
(
name
):
out_tensor
=
scope
.
find_var
(
name
).
get_tensor
()
grad_tensor
=
scope
.
new_var
(
grad_var_name
(
name
)).
get_tensor
()
out_dtype
=
out_tensor
.
dtype
()
if
out_dtype
==
core
.
DataType
.
FP64
:
data
=
np
.
ones
(
out_tensor
.
shape
(),
dtype
=
np
.
float64
)
elif
out_dtype
==
core
.
DataType
.
FP32
:
data
=
np
.
ones
(
out_tensor
.
shape
(),
dtype
=
np
.
float32
)
else
:
raise
ValueError
(
"Not supported data type "
+
str
(
out_dtype
))
grad_tensor
.
set
(
data
,
place
)
for
out_name
,
out_dup
in
Operator
.
get_op_outputs
(
op
.
type
()):
if
out_name
in
outputs
:
if
out_dup
:
sub_out
=
outputs
[
out_name
]
for
sub_out_name
,
_
in
sub_out
:
out_tensor
=
scope
.
find_var
(
sub_out_name
).
get_tensor
()
grad_tensor
=
scope
.
new_var
(
grad_var_name
(
sub_out_name
)).
get_tensor
()
grad_tensor
.
set_dims
(
out_tensor
.
shape
())
data
=
np
.
ones
(
out_tensor
.
shape
(),
dtype
=
np
.
float32
)
grad_tensor
.
set
(
data
,
place
)
__set_tensor__
(
sub_out_name
)
else
:
out_tensor
=
scope
.
find_var
(
out_name
).
get_tensor
()
grad_tensor
=
scope
.
new_var
(
grad_var_name
(
out_name
)).
get_tensor
(
)
grad_tensor
.
set_dims
(
out_tensor
.
shape
())
data
=
np
.
ones
(
out_tensor
.
shape
(),
dtype
=
np
.
float32
)
grad_tensor
.
set
(
data
,
place
)
__set_tensor__
(
out_name
)
def
get_numeric_gradient
(
scope
,
...
...
@@ -96,7 +93,6 @@ def get_numeric_gradient(scope,
output_names
,
delta
=
0.005
,
in_place
=
False
):
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
tensor_to_check
=
scope
.
find_var
(
input_to_check
).
get_tensor
()
...
...
@@ -115,7 +111,29 @@ def get_numeric_gradient(scope,
tensor_to_check
=
scope
.
find_var
(
input_to_check
).
get_tensor
()
tensor_size
=
product
(
tensor_to_check
.
get_dims
())
gradient_flat
=
np
.
zeros
(
shape
=
(
tensor_size
,
),
dtype
=
'float32'
)
tensor_to_check_dtype
=
tensor_to_check
.
dtype
()
if
tensor_to_check_dtype
==
core
.
DataType
.
FP32
:
tensor_to_check_dtype
=
np
.
float32
elif
tensor_to_check_dtype
==
core
.
DataType
.
FP64
:
tensor_to_check_dtype
=
np
.
float64
else
:
raise
ValueError
(
"Not supported data type "
+
str
(
tensor_to_check_dtype
))
gradient_flat
=
np
.
zeros
(
shape
=
(
tensor_size
,
),
dtype
=
tensor_to_check_dtype
)
def
__get_elem__
(
tensor
,
i
):
if
tensor_to_check_dtype
==
np
.
float32
:
return
tensor
.
get_float_element
(
i
)
else
:
return
tensor
.
get_double_element
(
i
)
def
__set_elem__
(
tensor
,
i
,
e
):
if
tensor_to_check_dtype
==
np
.
float32
:
tensor
.
set_float_element
(
i
,
e
)
else
:
tensor
.
set_double_element
(
i
,
e
)
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for
i
in
xrange
(
tensor_size
):
...
...
@@ -123,20 +141,20 @@ def get_numeric_gradient(scope,
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
# get one input element throw it's index i.
origin
=
tensor_to_check
.
get_float_element
(
i
)
origin
=
__get_elem__
(
tensor_to_check
,
i
)
# add delta to it, run op and then get the sum of the result tensor.
x_pos
=
origin
+
delta
tensor_to_check
.
set_float_element
(
i
,
x_pos
)
__set_elem__
(
tensor_to_check
,
i
,
x_pos
)
y_pos
=
get_output
()
if
in_place
:
set_input
(
scope
,
op
,
inputs
,
core
.
CPUPlace
())
x_neg
=
origin
-
delta
tensor_to_check
.
set_float_element
(
i
,
x_neg
)
__set_elem__
(
tensor_to_check
,
i
,
x_neg
)
y_neg
=
get_output
()
tensor_to_check
.
set_float_element
(
i
,
origin
)
__set_elem__
(
tensor_to_check
,
i
,
origin
)
gradient_flat
[
i
]
=
(
y_pos
-
y_neg
)
/
delta
/
2
return
gradient_flat
.
reshape
(
tensor_to_check
.
get_dims
())
...
...
python/paddle/v2/framework/tests/test_activation_op.py
浏览文件 @
a8c6ce9b
...
...
@@ -219,5 +219,22 @@ class TestSTanh(OpTest):
self
.
check_grad
([
'X'
],
'Y'
,
max_relative_error
=
0.007
)
class
TestSoftsign
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"softsign"
self
.
inputs
=
{
'X'
:
np
.
random
.
uniform
(
-
1
,
1
,
[
11
,
17
]).
astype
(
"float32"
)
}
self
.
outputs
=
{
'Y'
:
np
.
divide
(
self
.
inputs
[
'X'
],
1
+
np
.
abs
(
self
.
inputs
[
'X'
]))
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Y'
,
max_relative_error
=
0.007
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/v2/framework/tests/test_cross_entropy_op.py
浏览文件 @
a8c6ce9b
...
...
@@ -80,7 +80,7 @@ class TestCrossEntropyOp3(OpTest):
cross_entropy2
=
(
-
label
*
np
.
log
(
X
)).
sum
(
axis
=
1
,
keepdims
=
True
).
astype
(
"float32"
)
self
.
inputs
=
{
"X"
:
X
,
"Label"
:
label
}
self
.
inputs
=
{
"X"
:
X
,
"Label"
:
label
.
astype
(
np
.
float32
)
}
self
.
outputs
=
{
"Y"
:
cross_entropy
}
self
.
attrs
=
{
"softLabel"
:
True
}
...
...
python/paddle/v2/framework/tests/test_elementwise_mul_op.py
浏览文件 @
a8c6ce9b
...
...
@@ -7,8 +7,8 @@ class ElementwiseMulOp(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
"float
32
"
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
"float
32
"
)
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
"float
64
"
),
'Y'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
13
,
17
]).
astype
(
"float
64
"
)
}
self
.
outputs
=
{
'Out'
:
np
.
multiply
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
...
...
@@ -16,23 +16,21 @@ class ElementwiseMulOp(OpTest):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
max_relative_error
=
0.1
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
(
[
'Y'
],
'Out'
,
max_relative_error
=
0.1
,
no_grad_set
=
set
(
"X"
))
self
.
check_grad
([
'Y'
],
'Out'
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.1
,
no_grad_set
=
set
(
'Y'
))
self
.
check_grad
([
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
))
class
TestElementwiseMulOp_Vector
(
ElementwiseMulOp
):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
32
,
)).
astype
(
"float
32
"
),
'Y'
:
np
.
random
.
random
((
32
,
)).
astype
(
"float
32
"
)
'X'
:
np
.
random
.
random
((
32
,
)).
astype
(
"float
64
"
),
'Y'
:
np
.
random
.
random
((
32
,
)).
astype
(
"float
64
"
)
}
self
.
outputs
=
{
'Out'
:
np
.
multiply
(
self
.
inputs
[
'X'
],
self
.
inputs
[
'Y'
])}
...
...
@@ -41,8 +39,8 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
np
.
float
32
),
'Y'
:
np
.
random
.
rand
(
2
).
astype
(
np
.
float
32
)
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
np
.
float
64
),
'Y'
:
np
.
random
.
rand
(
2
).
astype
(
np
.
float
64
)
}
self
.
attrs
=
{
'axis'
:
0
}
...
...
@@ -55,8 +53,8 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
np
.
float
32
),
'Y'
:
np
.
random
.
rand
(
3
).
astype
(
np
.
float
32
)
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
np
.
float
64
),
'Y'
:
np
.
random
.
rand
(
3
).
astype
(
np
.
float
64
)
}
self
.
attrs
=
{
'axis'
:
1
}
...
...
@@ -69,8 +67,8 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
np
.
float
32
),
'Y'
:
np
.
random
.
rand
(
4
).
astype
(
np
.
float
32
)
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
).
astype
(
np
.
float
64
),
'Y'
:
np
.
random
.
rand
(
4
).
astype
(
np
.
float
64
)
}
self
.
outputs
=
{
...
...
@@ -82,8 +80,8 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def
setUp
(
self
):
self
.
op_type
=
"elementwise_mul"
self
.
inputs
=
{
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
,
5
).
astype
(
np
.
float
32
),
'Y'
:
np
.
random
.
rand
(
3
,
4
).
astype
(
np
.
float
32
)
'X'
:
np
.
random
.
rand
(
2
,
3
,
4
,
5
).
astype
(
np
.
float
64
),
'Y'
:
np
.
random
.
rand
(
3
,
4
).
astype
(
np
.
float
64
)
}
self
.
attrs
=
{
'axis'
:
1
}
...
...
python/paddle/v2/framework/tests/test_prelu_op.py
浏览文件 @
a8c6ce9b
...
...
@@ -17,7 +17,7 @@ class PReluTest(OpTest):
x_np_sign
=
np
.
sign
(
x_np
)
x_np
=
x_np_sign
*
np
.
maximum
(
x_np
,
.
005
)
alpha_np
=
np
.
array
([.
1
])
alpha_np
=
np
.
array
([.
1
]
,
dtype
=
"float32"
)
self
.
inputs
=
{
'X'
:
x_np
,
'Alpha'
:
alpha_np
}
out_np
=
np
.
maximum
(
self
.
inputs
[
'X'
],
0.
)
out_np
=
out_np
+
np
.
minimum
(
self
.
inputs
[
'X'
],
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录