Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
874cac0c
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
874cac0c
编写于
12月 26, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change softmax
上级
f8391545
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
20 addition
and
20 deletion
+20
-20
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+11
-11
paddle/operators/softmax_op.h
paddle/operators/softmax_op.h
+6
-6
python/paddle/v2/fluid/layer_helper.py
python/paddle/v2/fluid/layer_helper.py
+1
-1
python/paddle/v2/fluid/tests/test_softmax_op.py
python/paddle/v2/fluid/tests/test_softmax_op.py
+2
-2
未找到文件。
paddle/operators/softmax_op.cc
浏览文件 @
874cac0c
...
...
@@ -24,13 +24,13 @@ class SoftmaxOp : public framework::OperatorWithKernel {
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"
Y
"
),
"Output(
Y
) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"
Out
"
),
"Output(
Out
) of SoftmaxOp should not be null."
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE
(
x_dims
.
size
()
==
2UL
,
"The input of softmax op must be a matrix."
);
ctx
->
SetOutputDim
(
"
Y
"
,
x_dims
);
ctx
->
SetOutputDim
(
"
Out
"
,
x_dims
);
}
};
...
...
@@ -41,7 +41,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"X"
,
"The input tensor of softmax. "
"2-D with shape [batch_size, input_feature_dimensions]."
);
AddOutput
(
"
Y
"
,
"The normalized values with the same shape as X."
);
AddOutput
(
"
Out
"
,
"The normalized values with the same shape as X."
);
AddComment
(
R"DOC(
Softmax Operator.
...
...
@@ -59,7 +59,7 @@ exponential values of all the other dimensions is the output of the softmax
operator.
For each row $i$ and each column $j$ in Input(X), we have:
$$
Y
[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
$$
Out
[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
)DOC"
);
}
...
...
@@ -70,12 +70,12 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
Y"
),
"Input(Y
) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"
Y
"
)),
"Input(
Y
@GRAD) should be not null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"
Y
"
),
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"
Y
"
)),
"Input(
Y
) and its gradients should have a same shape."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
Out"
),
"Input(Out
) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"
Out
"
)),
"Input(
Out
@GRAD) should be not null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"
Out
"
),
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"
Out
"
)),
"Input(
Out
) and its gradients should have a same shape."
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
}
...
...
paddle/operators/softmax_op.h
浏览文件 @
874cac0c
...
...
@@ -26,13 +26,13 @@ class SoftmaxKernel : public framework::OpKernel<T> {
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
X
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
Y
=
context
.
Output
<
Tensor
>
(
"Y
"
);
auto
*
Out
=
context
.
Output
<
Tensor
>
(
"Out
"
);
// allocate memory on device.
Y
->
mutable_data
<
T
>
(
context
.
GetPlace
());
Out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
math
::
SoftmaxFunctor
<
DeviceContext
,
T
>
()(
context
.
template
device_context
<
DeviceContext
>(),
X
,
Y
);
context
.
template
device_context
<
DeviceContext
>(),
X
,
Out
);
}
};
...
...
@@ -40,15 +40,15 @@ template <typename DeviceContext, typename T>
class
SoftmaxGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
Y
=
context
.
Input
<
Tensor
>
(
"Y
"
);
auto
*
d
Y
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y
"
));
auto
*
Out
=
context
.
Input
<
Tensor
>
(
"Out
"
);
auto
*
d
Out
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out
"
));
auto
*
dX
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
// allocate memory on device.
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
math
::
SoftmaxGradFunctor
<
DeviceContext
,
T
>
()(
context
.
template
device_context
<
DeviceContext
>(),
Y
,
dY
,
dX
);
context
.
template
device_context
<
DeviceContext
>(),
Out
,
dOut
,
dX
);
}
};
...
...
python/paddle/v2/fluid/layer_helper.py
浏览文件 @
874cac0c
...
...
@@ -184,7 +184,7 @@ class LayerHelper(object):
self
.
append_op
(
type
=
act_type
,
inputs
=
{
"X"
:
[
input_var
]},
outputs
=
{
"
Y
"
:
[
tmp
]},
outputs
=
{
"
Out
"
:
[
tmp
]},
attrs
=
act
)
return
tmp
...
...
python/paddle/v2/fluid/tests/test_softmax_op.py
浏览文件 @
874cac0c
...
...
@@ -17,14 +17,14 @@ class TestSoftmaxOp(OpTest):
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
,
10
]).
astype
(
"float32"
)
}
self
.
outputs
=
{
'
Y
'
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
'X'
])
'
Out
'
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
'X'
])
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'
Y
'
)
self
.
check_grad
([
'X'
],
'
Out
'
)
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录