Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ec09ef26
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ec09ef26
编写于
3月 12, 2022
作者:
C
Chen Weihang
提交者:
GitHub
3月 12, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Phi] Add softmax infermeta functions (#40471)
* rename softmax kernel name * move softmax infershape * fix failed test
上级
76f87034
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
58 addition
and
67 deletion
+58
-67
paddle/fluid/operators/mkldnn/test_mkldnn_op_inplace.cc
paddle/fluid/operators/mkldnn/test_mkldnn_op_inplace.cc
+3
-0
paddle/fluid/operators/softmax_op.cc
paddle/fluid/operators/softmax_op.cc
+12
-43
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+6
-0
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+2
-0
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+19
-0
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+2
-0
paddle/phi/kernels/cpu/softmax_kernel.cc
paddle/phi/kernels/cpu/softmax_kernel.cc
+1
-1
paddle/phi/kernels/gpu/softmax_kernel.cu
paddle/phi/kernels/gpu/softmax_kernel.cu
+1
-1
paddle/phi/kernels/gpudnn/softmax_kernel.cu
paddle/phi/kernels/gpudnn/softmax_kernel.cu
+7
-7
paddle/phi/kernels/impl/softmax_kernel_impl.h
paddle/phi/kernels/impl/softmax_kernel_impl.h
+4
-4
paddle/phi/kernels/softmax_kernel.h
paddle/phi/kernels/softmax_kernel.h
+1
-11
未找到文件。
paddle/fluid/operators/mkldnn/test_mkldnn_op_inplace.cc
浏览文件 @
ec09ef26
...
...
@@ -24,6 +24,7 @@
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/kernel_registry.h"
USE_OP_ITSELF
(
elementwise_add
);
USE_OP_DEVICE_KERNEL
(
elementwise_add
,
MKLDNN
);
...
...
@@ -32,6 +33,8 @@ USE_OP_DEVICE_KERNEL(relu, MKLDNN);
USE_OP_ITSELF
(
softmax
);
USE_OP_DEVICE_KERNEL
(
softmax
,
MKLDNN
);
PD_DECLARE_KERNEL
(
softmax
,
CPU
,
ALL_LAYOUT
);
namespace
paddle
{
namespace
operators
{
...
...
paddle/fluid/operators/softmax_op.cc
浏览文件 @
ec09ef26
...
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include <string>
#include <unordered_map>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
...
...
@@ -23,6 +24,10 @@ limitations under the License. */
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace
paddle
{
namespace
operators
{
...
...
@@ -30,30 +35,6 @@ class SoftmaxOp : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of SoftmaxOp is not found."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Out) of SoftmaxOp is not found."
));
auto
dim_x
=
ctx
->
GetInputDim
(
"X"
);
auto
rank_x
=
dim_x
.
size
();
auto
axis
=
ctx
->
Attrs
().
Get
<
int
>
(
"axis"
);
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
platform
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
platform
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
ctx
->
SetOutputDim
(
"Out"
,
ctx
->
GetInputDim
(
"X"
));
ctx
->
ShareLoD
(
"X"
,
/*->*/
"Out"
);
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
...
...
@@ -168,23 +149,6 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Out"
),
true
,
platform
::
errors
::
InvalidArgument
(
"Input(Out) is not found."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
platform
::
errors
::
InvalidArgument
(
"Input(Out@GRAD) is not found."
));
PADDLE_ENFORCE_EQ
(
ctx
->
GetInputDim
(
"Out"
),
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
)),
platform
::
errors
::
InvalidArgument
(
"Input(Out) and its gradients "
"should have a same shape."
));
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
)));
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
...
...
@@ -244,9 +208,14 @@ DECLARE_INPLACE_OP_INFERER(SoftmaxInplaceInferer, {"X", "Out"});
namespace
ops
=
paddle
::
operators
;
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax
,
SoftmaxInferShapeFunctor
,
PD_INFER_META
(
phi
::
SoftmaxInferMeta
));
REGISTER_OPERATOR
(
softmax
,
ops
::
SoftmaxOp
,
ops
::
SoftmaxOpMaker
,
ops
::
SoftmaxOpInferVarType
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
SoftmaxInplaceInferer
);
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
);
ops
::
SoftmaxInplaceInferer
,
SoftmaxInferShapeFunctor
);
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGradnferShapeFunctor
,
PD_INFER_META
(
phi
::
GeneralUnaryGradInferMeta
));
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
,
SoftmaxGradnferShapeFunctor
);
paddle/phi/infermeta/backward.cc
浏览文件 @
ec09ef26
...
...
@@ -64,6 +64,12 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
if
(
dx
)
{
dx
->
share_meta
(
x
);
}
}
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
dx
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
ec09ef26
...
...
@@ -30,6 +30,8 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor
*
dweight
,
MetaTensor
*
dbias
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
dx
,
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
ec09ef26
...
...
@@ -1409,6 +1409,25 @@ void ShardIndexInferMeta(const MetaTensor& in,
out
->
set_dtype
(
in
.
dtype
());
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
}
// namespace phi
PD_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
ec09ef26
...
...
@@ -203,4 +203,6 @@ void ShardIndexInferMeta(const MetaTensor& in,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
}
// namespace phi
paddle/phi/kernels/cpu/softmax_kernel.cc
浏览文件 @
ec09ef26
...
...
@@ -19,4 +19,4 @@ limitations under the License. */
#include "paddle/phi/kernels/impl/softmax_kernel_impl.h"
PD_REGISTER_KERNEL
(
softmax
,
CPU
,
ALL_LAYOUT
,
phi
::
Softmax
Raw
Kernel
,
float
,
double
)
{}
softmax
,
CPU
,
ALL_LAYOUT
,
phi
::
SoftmaxKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/softmax_kernel.cu
浏览文件 @
ec09ef26
...
...
@@ -23,7 +23,7 @@ limitations under the License. */
PD_REGISTER_KERNEL
(
softmax
,
GPU
,
ALL_LAYOUT
,
phi
::
Softmax
Raw
Kernel
,
phi
::
SoftmaxKernel
,
float
,
double
,
phi
::
dtype
::
float16
,
...
...
paddle/phi/kernels/gpudnn/softmax_kernel.cu
浏览文件 @
ec09ef26
...
...
@@ -21,10 +21,10 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Softmax
Raw
GPUDNNKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
int
axis
,
DenseTensor
*
out
)
{
void
SoftmaxGPUDNNKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
int
axis
,
DenseTensor
*
out
)
{
dev_ctx
.
template
Alloc
<
T
>(
out
);
SoftmaxForwardCUDAKernelDriver
<
T
>
(
dev_ctx
,
x
,
axis
,
out
);
}
...
...
@@ -35,7 +35,7 @@ void SoftmaxRawGPUDNNKernel(const Context& dev_ctx,
PD_REGISTER_KERNEL
(
softmax
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Softmax
Raw
GPUDNNKernel
,
phi
::
SoftmaxGPUDNNKernel
,
float
,
phi
::
dtype
::
float16
,
phi
::
dtype
::
bfloat16
)
{}
...
...
@@ -44,7 +44,7 @@ PD_REGISTER_KERNEL(softmax,
PD_REGISTER_KERNEL
(
softmax
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Softmax
Raw
GPUDNNKernel
,
phi
::
SoftmaxGPUDNNKernel
,
float
,
double
,
phi
::
dtype
::
float16
,
...
...
@@ -53,7 +53,7 @@ PD_REGISTER_KERNEL(softmax,
PD_REGISTER_KERNEL
(
softmax
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Softmax
Raw
GPUDNNKernel
,
phi
::
SoftmaxGPUDNNKernel
,
float
,
double
,
phi
::
dtype
::
float16
)
{}
...
...
paddle/phi/kernels/impl/softmax_kernel_impl.h
浏览文件 @
ec09ef26
...
...
@@ -22,10 +22,10 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Softmax
Raw
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
int
axis
,
DenseTensor
*
out
)
{
void
SoftmaxKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
int
axis
,
DenseTensor
*
out
)
{
const
int
rank
=
x
.
dims
().
size
();
const
int
calc_axis
=
phi
::
funcs
::
CanonicalAxis
(
axis
,
rank
);
int
axis_dim
=
x
.
dims
()[
calc_axis
];
...
...
paddle/phi/kernels/softmax_kernel.h
浏览文件 @
ec09ef26
...
...
@@ -19,20 +19,10 @@ limitations under the License. */
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
SoftmaxRawKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
int
axis
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
SoftmaxKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
int
axis
,
DataType
dtype
,
DenseTensor
*
out
)
{
auto
cast_x
=
phi
::
Cast
<
T
,
Context
>
(
dev_ctx
,
x
,
dtype
);
phi
::
SoftmaxRawKernel
<
T
,
Context
>
(
dev_ctx
,
axis
,
out
);
}
DenseTensor
*
out
);
}
// namespace phi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录