Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
1b2374ad
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1b2374ad
编写于
9月 15, 2017
作者:
Z
zchen0211
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
new prelu with functor
上级
c7dfec11
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
62 addition
and
24 deletion
+62
-24
paddle/operators/prelu_op.cc
paddle/operators/prelu_op.cc
+10
-5
paddle/operators/prelu_op.h
paddle/operators/prelu_op.h
+51
-18
python/paddle/v2/framework/tests/test_prelu_op.py
python/paddle/v2/framework/tests/test_prelu_op.py
+1
-1
未找到文件。
paddle/operators/prelu_op.cc
浏览文件 @
1b2374ad
...
...
@@ -27,13 +27,14 @@ class PReluOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) should not be null"
);
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
out
->
Resize
(
in
->
dims
());
}
};
//
template <typename AttrType>
template
<
typename
AttrType
>
class
PReluOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
PReluOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
...
...
@@ -43,10 +44,12 @@ class PReluOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment
(
R"DOC(PRelu operator
The equation is:
f(x) = alpha * x , for x < 0
f(x) = x , for x >= 0
f(x) = alpha * x , for x < 0
f(x) = x , for x >= 0
)DOC"
);
AddAttr
<
float
>
(
"alpha"
,
"The scaling factor alpha of prelu."
)
AddAttr
<
AttrType
>
(
"alpha"
,
"The scaling factor alpha of prelu."
)
.
SetDefault
(
0.0
);
}
};
...
...
@@ -59,6 +62,8 @@ class PReluGradOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
"X"
),
"Input(X) must not be null."
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
framework
::
GradVarName
(
"Out"
)),
"Input(Out@GRAD) should not be null"
);
auto
*
X_grad
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
X
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
...
...
@@ -72,7 +77,7 @@ class PReluGradOp : public framework::OperatorWithKernel {
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
prelu
,
ops
::
PReluOp
,
ops
::
PReluOpMaker
,
prelu_grad
,
REGISTER_OP
(
prelu
,
ops
::
PReluOp
,
ops
::
PReluOpMaker
<
float
>
,
prelu_grad
,
ops
::
PReluGradOp
);
REGISTER_OP_CPU_KERNEL
(
prelu
,
ops
::
PReluKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
...
...
paddle/operators/prelu_op.h
浏览文件 @
1b2374ad
...
...
@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace
paddle
{
namespace
operators
{
...
...
@@ -23,28 +24,60 @@ using Tensor = framework::Tensor;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
using
platform
::
Transform
;
template
<
typename
Place
,
typename
T
>
template
<
typename
T
>
class
Prelu_functor
{
public:
explicit
Prelu_functor
(
const
T
&
alpha
)
:
alpha_
(
alpha
)
{}
HOSTDEVICE
T
operator
()(
const
T
&
X
)
const
{
if
(
X
>
0
)
return
X
;
else
return
X
*
alpha_
;
}
private:
T
alpha_
;
};
template
<
typename
Place
,
typename
T
,
typename
AttrType
=
T
>
class
PReluKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
X
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
Out
=
context
.
Output
<
Tensor
>
(
"Out"
);
Out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
T
*
X_ptr
=
X
->
data
<
T
>
();
T
*
O_ptr
=
Out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
alpha
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"alpha"
));
auto
alpha
=
static_cast
<
T
>
(
context
.
Attr
<
AttrType
>
(
"alpha"
));
auto
X_vec
=
EigenVector
<
T
>::
Flatten
(
*
X
);
auto
Out_vec
=
EigenVector
<
T
>::
Flatten
(
*
Out
);
int
numel
=
X
->
numel
();
// auto place = context.GetEigenDevice<Place>();
// Out_vec.device(place)
Out_vec
=
X_vec
.
cwiseMax
(
0.
f
)
+
X_vec
.
cwiseMin
(
0.
f
)
*
alpha
;
auto
place
=
context
.
GetPlace
();
Transform
(
place
,
X_ptr
,
X_ptr
+
numel
,
O_ptr
,
Prelu_functor
<
T
>
(
alpha
));
}
};
template
<
typename
Place
,
typename
T
>
template
<
typename
T
>
class
Prelu_Grad_functor
{
public:
explicit
Prelu_Grad_functor
(
const
T
&
alpha
)
:
alpha_
(
alpha
)
{}
HOSTDEVICE
T
operator
()(
const
T
&
Out
,
const
T
&
dOut
)
const
{
if
(
Out
>
0
)
return
dOut
;
else
return
dOut
*
alpha_
;
}
private:
T
alpha_
;
};
template
<
typename
Place
,
typename
T
,
typename
AttrType
=
T
>
class
PReluGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
...
...
@@ -53,16 +86,16 @@ class PReluGradKernel : public framework::OpKernel {
auto
*
Out
=
context
.
Input
<
Tensor
>
(
"Out"
);
auto
alpha
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"alpha"
));
auto
alpha
=
static_cast
<
T
>
(
context
.
Attr
<
AttrType
>
(
"alpha"
));
T
*
dX_ptr
=
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
T
*
dO_ptr
=
dO
->
data
<
T
>
();
const
T
*
O_ptr
=
Out
->
data
<
T
>
();
int
numel
=
dX
->
numel
();
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
for
(
int
i
=
0
;
i
<
dX
->
numel
();
++
i
)
{
if
(
Out
->
data
<
T
>
()[
i
]
>
0
)
{
dX
->
data
<
T
>
()[
i
]
=
dO
->
data
<
T
>
()[
i
];
}
else
{
dX
->
data
<
T
>
()[
i
]
=
dO
->
data
<
T
>
()[
i
]
*
alpha
;
}
}
auto
place
=
context
.
GetPlace
();
Transform
(
place
,
O_ptr
,
O_ptr
+
numel
,
dO_ptr
,
dX_ptr
,
Prelu_Grad_functor
<
T
>
(
alpha
));
}
};
...
...
python/paddle/v2/framework/tests/test_prelu_op.py
浏览文件 @
1b2374ad
...
...
@@ -6,7 +6,7 @@ from op_test import OpTest
class
PreluTest
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"prelu"
self
.
inputs
=
{
'X'
:
np
.
random
.
normal
(
size
=
(
3
,
5
)).
astype
(
"float32"
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
normal
(
size
=
(
10
,
10
)).
astype
(
"float32"
)}
self
.
attrs
=
{
'alpha'
:
0.1
}
out_np
=
np
.
maximum
(
self
.
inputs
[
'X'
],
0.
)
out_np
=
out_np
+
np
.
minimum
(
self
.
inputs
[
'X'
],
0.
)
*
self
.
attrs
[
'alpha'
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录