Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
874cac0c
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
874cac0c
编写于
12月 26, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change softmax
上级
f8391545
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
20 addition
and
20 deletion
+20
-20
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+11
-11
paddle/operators/softmax_op.h
paddle/operators/softmax_op.h
+6
-6
python/paddle/v2/fluid/layer_helper.py
python/paddle/v2/fluid/layer_helper.py
+1
-1
python/paddle/v2/fluid/tests/test_softmax_op.py
python/paddle/v2/fluid/tests/test_softmax_op.py
+2
-2
未找到文件。
paddle/operators/softmax_op.cc
浏览文件 @
874cac0c
...
@@ -24,13 +24,13 @@ class SoftmaxOp : public framework::OperatorWithKernel {
...
@@ -24,13 +24,13 @@ class SoftmaxOp : public framework::OperatorWithKernel {
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of SoftmaxOp should not be null."
);
"Input(X) of SoftmaxOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"
Y
"
),
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"
Out
"
),
"Output(
Y
) of SoftmaxOp should not be null."
);
"Output(
Out
) of SoftmaxOp should not be null."
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE
(
x_dims
.
size
()
==
2UL
,
PADDLE_ENFORCE
(
x_dims
.
size
()
==
2UL
,
"The input of softmax op must be a matrix."
);
"The input of softmax op must be a matrix."
);
ctx
->
SetOutputDim
(
"
Y
"
,
x_dims
);
ctx
->
SetOutputDim
(
"
Out
"
,
x_dims
);
}
}
};
};
...
@@ -41,7 +41,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -41,7 +41,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"X"
,
AddInput
(
"X"
,
"The input tensor of softmax. "
"The input tensor of softmax. "
"2-D with shape [batch_size, input_feature_dimensions]."
);
"2-D with shape [batch_size, input_feature_dimensions]."
);
AddOutput
(
"
Y
"
,
"The normalized values with the same shape as X."
);
AddOutput
(
"
Out
"
,
"The normalized values with the same shape as X."
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Softmax Operator.
Softmax Operator.
...
@@ -59,7 +59,7 @@ exponential values of all the other dimensions is the output of the softmax
...
@@ -59,7 +59,7 @@ exponential values of all the other dimensions is the output of the softmax
operator.
operator.
For each row $i$ and each column $j$ in Input(X), we have:
For each row $i$ and each column $j$ in Input(X), we have:
$$
Y
[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
$$
Out
[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
)DOC"
);
)DOC"
);
}
}
...
@@ -70,12 +70,12 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
...
@@ -70,12 +70,12 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
Y"
),
"Input(Y
) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"
Out"
),
"Input(Out
) should be not null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"
Y
"
)),
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"
Out
"
)),
"Input(
Y
@GRAD) should be not null."
);
"Input(
Out
@GRAD) should be not null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"
Y
"
),
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"
Out
"
),
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"
Y
"
)),
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"
Out
"
)),
"Input(
Y
) and its gradients should have a same shape."
);
"Input(
Out
) and its gradients should have a same shape."
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
}
}
...
...
paddle/operators/softmax_op.h
浏览文件 @
874cac0c
...
@@ -26,13 +26,13 @@ class SoftmaxKernel : public framework::OpKernel<T> {
...
@@ -26,13 +26,13 @@ class SoftmaxKernel : public framework::OpKernel<T> {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
X
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
X
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
Y
=
context
.
Output
<
Tensor
>
(
"Y
"
);
auto
*
Out
=
context
.
Output
<
Tensor
>
(
"Out
"
);
// allocate memory on device.
// allocate memory on device.
Y
->
mutable_data
<
T
>
(
context
.
GetPlace
());
Out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
math
::
SoftmaxFunctor
<
DeviceContext
,
T
>
()(
math
::
SoftmaxFunctor
<
DeviceContext
,
T
>
()(
context
.
template
device_context
<
DeviceContext
>(),
X
,
Y
);
context
.
template
device_context
<
DeviceContext
>(),
X
,
Out
);
}
}
};
};
...
@@ -40,15 +40,15 @@ template <typename DeviceContext, typename T>
...
@@ -40,15 +40,15 @@ template <typename DeviceContext, typename T>
class
SoftmaxGradKernel
:
public
framework
::
OpKernel
<
T
>
{
class
SoftmaxGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
Y
=
context
.
Input
<
Tensor
>
(
"Y
"
);
auto
*
Out
=
context
.
Input
<
Tensor
>
(
"Out
"
);
auto
*
d
Y
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y
"
));
auto
*
d
Out
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out
"
));
auto
*
dX
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
dX
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
// allocate memory on device.
// allocate memory on device.
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
dX
->
mutable_data
<
T
>
(
context
.
GetPlace
());
math
::
SoftmaxGradFunctor
<
DeviceContext
,
T
>
()(
math
::
SoftmaxGradFunctor
<
DeviceContext
,
T
>
()(
context
.
template
device_context
<
DeviceContext
>(),
Y
,
dY
,
dX
);
context
.
template
device_context
<
DeviceContext
>(),
Out
,
dOut
,
dX
);
}
}
};
};
...
...
python/paddle/v2/fluid/layer_helper.py
浏览文件 @
874cac0c
...
@@ -184,7 +184,7 @@ class LayerHelper(object):
...
@@ -184,7 +184,7 @@ class LayerHelper(object):
self
.
append_op
(
self
.
append_op
(
type
=
act_type
,
type
=
act_type
,
inputs
=
{
"X"
:
[
input_var
]},
inputs
=
{
"X"
:
[
input_var
]},
outputs
=
{
"
Y
"
:
[
tmp
]},
outputs
=
{
"
Out
"
:
[
tmp
]},
attrs
=
act
)
attrs
=
act
)
return
tmp
return
tmp
...
...
python/paddle/v2/fluid/tests/test_softmax_op.py
浏览文件 @
874cac0c
...
@@ -17,14 +17,14 @@ class TestSoftmaxOp(OpTest):
...
@@ -17,14 +17,14 @@ class TestSoftmaxOp(OpTest):
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
,
10
]).
astype
(
"float32"
)
'X'
:
np
.
random
.
uniform
(
0.1
,
1
,
[
10
,
10
]).
astype
(
"float32"
)
}
}
self
.
outputs
=
{
self
.
outputs
=
{
'
Y
'
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
'X'
])
'
Out
'
:
np
.
apply_along_axis
(
stable_softmax
,
1
,
self
.
inputs
[
'X'
])
}
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'
Y
'
)
self
.
check_grad
([
'X'
],
'
Out
'
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录