Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
36492bc5
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
36492bc5
编写于
3月 23, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
3月 23, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rename elementwise fmax (#40810)
上级
3980e222
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
69 addition
and
91 deletion
+69
-91
paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
+2
-2
paddle/phi/kernels/cpu/elementwise_kernel.cc
paddle/phi/kernels/cpu/elementwise_kernel.cc
+4
-16
paddle/phi/kernels/elementwise_kernel.h
paddle/phi/kernels/elementwise_kernel.h
+10
-10
paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
+2
-2
paddle/phi/kernels/gpu/elementwise_kernel.cu
paddle/phi/kernels/gpu/elementwise_kernel.cu
+4
-16
paddle/phi/kernels/impl/elementwise_kernel_impl.h
paddle/phi/kernels/impl/elementwise_kernel_impl.h
+10
-10
paddle/phi/ops/compat/elementwise_sig.cc
paddle/phi/ops/compat/elementwise_sig.cc
+37
-35
未找到文件。
paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
浏览文件 @
36492bc5
...
...
@@ -259,7 +259,7 @@ PD_REGISTER_KERNEL(multiply_triple_grad,
phi
::
dtype
::
bfloat16
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
PD_REGISTER_KERNEL
(
elementwise_
fmax_grad
,
PD_REGISTER_KERNEL
(
fmax_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMaxGradKernel
,
...
...
@@ -268,7 +268,7 @@ PD_REGISTER_KERNEL(elementwise_fmax_grad,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
elementwise_
fmin_grad
,
PD_REGISTER_KERNEL
(
fmin_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMinGradKernel
,
...
...
paddle/phi/kernels/cpu/elementwise_kernel.cc
浏览文件 @
36492bc5
...
...
@@ -87,23 +87,11 @@ using complex128 = ::phi::dtype::complex<double>;
// NOTE(chenweihang): using bfloat16 will cause redefine with xpu bfloat16
// using bfloat16 = ::phi::dtype::bfloat16;
PD_REGISTER_KERNEL
(
elementwise_fmax
,
CPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMaxKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
fmax
,
CPU
,
ALL_LAYOUT
,
phi
::
FMaxKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
elementwise_fmin
,
CPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMinKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
fmin
,
CPU
,
ALL_LAYOUT
,
phi
::
FMinKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
add_raw
,
CPU
,
...
...
paddle/phi/kernels/elementwise_kernel.h
浏览文件 @
36492bc5
...
...
@@ -20,18 +20,18 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Elementwise
FMaxKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
);
void
FMaxKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
Elementwise
FMinKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
);
void
FMinKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
AddRawKernel
(
const
Context
&
dev_ctx
,
...
...
paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
浏览文件 @
36492bc5
...
...
@@ -282,7 +282,7 @@ PD_REGISTER_KERNEL(multiply_triple_grad,
phi
::
dtype
::
bfloat16
,
phi
::
dtype
::
complex
<
float
>
,
phi
::
dtype
::
complex
<
double
>
)
{}
PD_REGISTER_KERNEL
(
elementwise_
fmax_grad
,
PD_REGISTER_KERNEL
(
fmax_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMaxGradKernel
,
...
...
@@ -291,7 +291,7 @@ PD_REGISTER_KERNEL(elementwise_fmax_grad,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
elementwise_
fmin_grad
,
PD_REGISTER_KERNEL
(
fmin_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMinGradKernel
,
...
...
paddle/phi/kernels/gpu/elementwise_kernel.cu
浏览文件 @
36492bc5
...
...
@@ -57,23 +57,11 @@ using bfloat16 = phi::dtype::bfloat16;
using
complex64
=
::
phi
::
dtype
::
complex
<
float
>
;
using
complex128
=
::
phi
::
dtype
::
complex
<
double
>
;
PD_REGISTER_KERNEL
(
elementwise_fmax
,
GPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMaxKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
fmax
,
GPU
,
ALL_LAYOUT
,
phi
::
FMaxKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
elementwise_fmin
,
GPU
,
ALL_LAYOUT
,
phi
::
ElementwiseFMinKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
fmin
,
GPU
,
ALL_LAYOUT
,
phi
::
FMinKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
add_raw
,
GPU
,
...
...
paddle/phi/kernels/impl/elementwise_kernel_impl.h
浏览文件 @
36492bc5
...
...
@@ -23,22 +23,22 @@
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Elementwise
FMaxKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
)
{
void
FMaxKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
)
{
dev_ctx
.
template
Alloc
<
T
>(
out
);
funcs
::
ElementwiseCompute
<
funcs
::
FMaxFunctor
<
T
>
,
T
,
T
>
(
dev_ctx
,
x
,
y
,
axis
,
funcs
::
FMaxFunctor
<
T
>
(),
out
);
}
template
<
typename
T
,
typename
Context
>
void
Elementwise
FMinKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
)
{
void
FMinKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
)
{
dev_ctx
.
template
Alloc
<
T
>(
out
);
funcs
::
ElementwiseCompute
<
funcs
::
FMinFunctor
<
T
>
,
T
,
T
>
(
dev_ctx
,
x
,
y
,
axis
,
funcs
::
FMinFunctor
<
T
>
(),
out
);
...
...
paddle/phi/ops/compat/elementwise_sig.cc
浏览文件 @
36492bc5
...
...
@@ -19,25 +19,19 @@ namespace phi {
KernelSignature
ElementwiseAddOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
int
axis
=
paddle
::
any_cast
<
int
>
(
ctx
.
Attr
(
"axis"
));
if
(
ctx
.
IsDenseTensorInput
(
"X"
))
{
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"add"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"add_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"add"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"
unregistered"
,
{},
{},
{
});
return
KernelSignature
(
"
add_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
}
KernelSignature
ElementwiseSubOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
int
axis
=
paddle
::
any_cast
<
int
>
(
ctx
.
Attr
(
"axis"
));
if
(
ctx
.
IsDenseTensorInput
(
"X"
))
{
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"subtract"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"subtract_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"subtract"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"
unregistered"
,
{},
{},
{
});
return
KernelSignature
(
"
subtract_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
}
KernelSignature
ElementwiseMulOpArgumentMapping
(
...
...
@@ -55,24 +49,18 @@ KernelSignature ElementwiseMulOpArgumentMapping(
KernelSignature
ElementwiseDivOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
int
axis
=
paddle
::
any_cast
<
int
>
(
ctx
.
Attr
(
"axis"
));
if
(
ctx
.
IsDenseTensorInput
(
"X"
))
{
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"divide"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"divide_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"divide"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"
unregistered"
,
{},
{},
{
});
return
KernelSignature
(
"
divide_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
}
KernelSignature
ElementwiseAddGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsDenseTensorInput
(
"X"
))
{
return
KernelSignature
(
"add_grad"
,
{
"X"
,
"Y"
,
GradVarName
(
"Out"
)},
{
"axis"
},
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
}
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
return
KernelSignature
(
"add_grad"
,
{
"X"
,
"Y"
,
GradVarName
(
"Out"
)},
{
"axis"
},
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
}
KernelSignature
ElementwiseAddDoubleGradOpArgumentMapping
(
...
...
@@ -91,13 +79,10 @@ KernelSignature ElementwiseAddTripleGradOpArgumentMapping(
KernelSignature
ElementwiseSubGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
if
(
ctx
.
IsDenseTensorInput
(
"X"
))
{
return
KernelSignature
(
"subtract_grad"
,
{
"X"
,
"Y"
,
GradVarName
(
"Out"
)},
{
"axis"
},
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
}
return
KernelSignature
(
"unregistered"
,
{},
{},
{});
return
KernelSignature
(
"subtract_grad"
,
{
"X"
,
"Y"
,
GradVarName
(
"Out"
)},
{
"axis"
},
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
}
KernelSignature
ElementwiseSubDoubleGradOpArgumentMapping
(
...
...
@@ -116,7 +101,7 @@ KernelSignature ElementwiseDivGradOpArgumentMapping(
KernelSignature
ElementwiseFMinGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"
elementwise_
fmin_grad"
,
return
KernelSignature
(
"fmin_grad"
,
{
"X"
,
"Y"
,
GradVarName
(
"Out"
)},
{
"axis"
},
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
...
...
@@ -138,9 +123,19 @@ KernelSignature ElementwiseMulGradOpArgumentMapping(
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
}
KernelSignature
ElementwiseFMaxOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"fmax"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
}
KernelSignature
ElementwiseFMinOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"fmin"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
}
KernelSignature
ElementwiseFMaxGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"
elementwise_
fmax_grad"
,
return
KernelSignature
(
"fmax_grad"
,
{
"X"
,
"Y"
,
GradVarName
(
"Out"
)},
{
"axis"
},
{
GradVarName
(
"X"
),
GradVarName
(
"Y"
)});
...
...
@@ -179,6 +174,10 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_div_grad_grad, divide_double_grad);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_mul_grad
,
multiply_grad
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_mul_grad_grad
,
multiply_double_grad
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_mul_triple_grad
,
multiply_triple_grad
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_fmax
,
fmax
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_fmin
,
fmin
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_fmax_grad
,
fmax_grad
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_fmin_grad
,
fmin_grad
);
PD_REGISTER_ARG_MAPPING_FN
(
elementwise_add
,
phi
::
ElementwiseAddOpArgumentMapping
);
...
...
@@ -208,9 +207,12 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_mul_grad_grad,
phi
::
ElementwiseMulDoubleGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
elementwise_mul_triple_grad
,
phi
::
ElementwiseMulTripleGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
elementwise_fmax
,
phi
::
ElementwiseFMaxOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
elementwise_fmin
,
phi
::
ElementwiseFMinOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
elementwise_fmax_grad
,
phi
::
ElementwiseFMaxGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
elementwise_fmin_grad
,
phi
::
ElementwiseFMinGradOpArgumentMapping
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录