Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
0560733c
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0560733c
编写于
8月 02, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add sigmoid backward implenmention.
上级
f70e8077
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
39 addition
and
4 deletion
+39
-4
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+8
-4
paddle/operators/sigmoid_op.cu
paddle/operators/sigmoid_op.cu
+1
-0
paddle/operators/sigmoid_op.h
paddle/operators/sigmoid_op.h
+19
-0
python/paddle/v2/framework/tests/test_sigmoid_op.py
python/paddle/v2/framework/tests/test_sigmoid_op.py
+11
-0
未找到文件。
paddle/operators/sigmoid_op.cc
浏览文件 @
0560733c
...
@@ -37,10 +37,12 @@ public:
...
@@ -37,10 +37,12 @@ public:
class
SigmoidOpGrad
:
public
OperatorWithKernel
{
class
SigmoidOpGrad
:
public
OperatorWithKernel
{
protected:
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{}
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
std
::
string
DebugString
()
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
1
,
LOG
(
INFO
)
<<
"SigmoidGrad"
;
"Sigmoid Gradient Op only have one input"
);
return
""
;
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"Sigmoid Gradient Op only have one output"
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
}
}
};
};
...
@@ -51,3 +53,5 @@ REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker);
...
@@ -51,3 +53,5 @@ REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker);
REGISTER_GRADIENT_OP
(
sigmoid
,
sigmoid_grad
,
ops
::
SigmoidOpGrad
);
REGISTER_GRADIENT_OP
(
sigmoid
,
sigmoid_grad
,
ops
::
SigmoidOpGrad
);
REGISTER_OP_CPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
sigmoid_grad
,
ops
::
SigmoidGradKernel
<
ops
::
CPUPlace
,
float
>
);
paddle/operators/sigmoid_op.cu
浏览文件 @
0560733c
#include "paddle/operators/sigmoid_op.h"
#include "paddle/operators/sigmoid_op.h"
REGISTER_OP_GPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
sigmoid_grad
,
ops
::
SigmoidGradKernel
<
ops
::
GPUPlace
,
float
>
);
paddle/operators/sigmoid_op.h
浏览文件 @
0560733c
...
@@ -32,5 +32,24 @@ public:
...
@@ -32,5 +32,24 @@ public:
1.0
/
(
1.0
+
(
-
1.0
*
EigenVector
<
T
>::
Flatten
(
*
input
)).
exp
());
1.0
/
(
1.0
+
(
-
1.0
*
EigenVector
<
T
>::
Flatten
(
*
input
)).
exp
());
}
}
};
};
template
<
typename
Place
,
typename
T
>
class
SigmoidGradKernel
:
public
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
// TODO(qingqing) maybe a helper funciton is needed fo the name x@GRAD
auto
y_t
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
dy_t
=
context
.
Input
<
Tensor
>
(
"Y@GRAD"
);
auto
dx_t
=
context
.
Output
<
Tensor
>
(
"X@GRAD"
);
dx_t
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
dx
=
EigenVector
<
T
>::
Flatten
(
*
dx_t
);
auto
y
=
EigenVector
<
T
>::
Flatten
(
*
y_t
);
auto
dy
=
EigenVector
<
T
>::
Flatten
(
*
dy_t
);
dx
.
device
(
*
(
context
.
GetEigenDevice
<
Place
>
()))
=
dy
*
y
*
(
1.
-
y
);
}
};
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
python/paddle/v2/framework/tests/test_sigmoid_op.py
浏览文件 @
0560733c
...
@@ -12,5 +12,16 @@ class TestSigmoidOp(unittest.TestCase):
...
@@ -12,5 +12,16 @@ class TestSigmoidOp(unittest.TestCase):
self
.
Y
=
1
/
(
1
+
np
.
exp
(
-
self
.
X
))
self
.
Y
=
1
/
(
1
+
np
.
exp
(
-
self
.
X
))
#class TestSigmoidGradOp(unittest.TestCase):
# __metaclass__ = OpTestMeta
#
# def setUp(self):
# self.type = "sigmoid_grad"
# self.Y = np.random.random((32, 100)).astype("float32")
# self.dY = np.random.random((32, 100)).astype("float32")
# self.dX = self.dY * self.Y * (1 - self.Y)
# print self.dX
#
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录