Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ad96fe2c
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ad96fe2c
编写于
8月 29, 2022
作者:
C
Chen Weihang
提交者:
GitHub
8月 30, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
rename mod c api name (#45476)
上级
66c8ada0
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
68 addition
and
68 deletion
+68
-68
paddle/phi/api/yaml/legacy_api.yaml
paddle/phi/api/yaml/legacy_api.yaml
+9
-9
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+0
-11
paddle/phi/kernels/cpu/elementwise_kernel.cc
paddle/phi/kernels/cpu/elementwise_kernel.cc
+11
-11
paddle/phi/kernels/elementwise_kernel.cc
paddle/phi/kernels/elementwise_kernel.cc
+21
-9
paddle/phi/kernels/elementwise_kernel.h
paddle/phi/kernels/elementwise_kernel.h
+12
-12
paddle/phi/kernels/funcs/elementwise_functor.h
paddle/phi/kernels/funcs/elementwise_functor.h
+5
-5
paddle/phi/kernels/kps/elementwise_kernel.cu
paddle/phi/kernels/kps/elementwise_kernel.cu
+4
-4
paddle/phi/ops/compat/elementwise_sig.cc
paddle/phi/ops/compat/elementwise_sig.cc
+3
-3
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+1
-1
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+1
-2
未找到文件。
paddle/phi/api/yaml/legacy_api.yaml
浏览文件 @
ad96fe2c
...
...
@@ -1798,15 +1798,6 @@
func
:
mode
backward
:
mode_grad
-
api
:
modulo
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
modulo
backward
:
modulo_grad
-
api
:
momentum_
args
:
(Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov =
false
, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision =
false
, float rescale_grad = 1.0f)
output
:
Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out)
...
...
@@ -2128,6 +2119,15 @@
func
:
relu6
backward
:
relu6_grad
-
api
:
remainder
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
remainder
inplace
:
(x -> out)
-
api
:
renorm
args
:
(Tensor x, float p, int axis, float max_norm)
output
:
Tensor
...
...
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
ad96fe2c
...
...
@@ -1576,17 +1576,6 @@
kernel
:
func
:
mode_grad
-
backward_api
:
modulo_grad
forward
:
modulo (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
modulo_grad
no_need_buffer
:
x, y
-
backward_api
:
multi_dot_grad
forward
:
multi_dot (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
...
...
paddle/phi/kernels/cpu/elementwise_kernel.cc
浏览文件 @
ad96fe2c
...
...
@@ -46,21 +46,21 @@ void MinimumRawKernel(const Context& dev_ctx,
}
template
<
typename
T
,
typename
Context
>
void
Modulo
RawKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
)
{
void
Remainder
RawKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
)
{
// allocate memory for out
dev_ctx
.
template
Alloc
<
T
>(
out
);
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
if
(
x_dims
.
size
()
>=
y_dims
.
size
())
{
funcs
::
ElementwiseCompute
<
funcs
::
Modulo
Functor
<
T
>
,
T
>
(
dev_ctx
,
x
,
y
,
axis
,
funcs
::
Modulo
Functor
<
T
>
(),
out
);
funcs
::
ElementwiseCompute
<
funcs
::
Remainder
Functor
<
T
>
,
T
>
(
dev_ctx
,
x
,
y
,
axis
,
funcs
::
Remainder
Functor
<
T
>
(),
out
);
}
else
{
funcs
::
ElementwiseCompute
<
funcs
::
Inverse
Modulo
Functor
<
T
>
,
T
>
(
dev_ctx
,
x
,
y
,
axis
,
funcs
::
Inverse
Modulo
Functor
<
T
>
(),
out
);
funcs
::
ElementwiseCompute
<
funcs
::
Inverse
Remainder
Functor
<
T
>
,
T
>
(
dev_ctx
,
x
,
y
,
axis
,
funcs
::
Inverse
Remainder
Functor
<
T
>
(),
out
);
}
}
...
...
@@ -139,10 +139,10 @@ PD_REGISTER_KERNEL(minimum_raw,
int
,
int64_t
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
modulo
_raw
,
PD_REGISTER_KERNEL
(
remainder
_raw
,
CPU
,
ALL_LAYOUT
,
phi
::
Modulo
RawKernel
,
phi
::
Remainder
RawKernel
,
float
,
double
,
int
,
...
...
paddle/phi/kernels/elementwise_kernel.cc
浏览文件 @
ad96fe2c
...
...
@@ -38,12 +38,12 @@ void MinimumKernel(const Context& dev_ctx,
}
template
<
typename
T
,
typename
Context
>
void
Modulo
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
DenseTensor
*
out
)
{
void
Remainder
Kernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
DenseTensor
*
out
)
{
int
axis
=
-
1
;
Modulo
RawKernel
<
T
>
(
dev_ctx
,
x
,
y
,
axis
,
out
);
Remainder
RawKernel
<
T
>
(
dev_ctx
,
x
,
y
,
axis
,
out
);
}
template
<
typename
T
,
typename
Context
>
...
...
@@ -96,8 +96,14 @@ PD_REGISTER_KERNEL(minimum,
int
,
int64_t
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
modulo
,
CPU
,
ALL_LAYOUT
,
phi
::
ModuloKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
remainder
,
CPU
,
ALL_LAYOUT
,
phi
::
RemainderKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
floor_divide
,
CPU
,
ALL_LAYOUT
,
phi
::
FloorDivideKernel
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
elementwise_heaviside
,
...
...
@@ -139,8 +145,14 @@ PD_REGISTER_KERNEL(minimum,
int64_t
,
phi
::
dtype
::
float16
,
phi
::
dtype
::
bfloat16
)
{}
PD_REGISTER_KERNEL
(
modulo
,
GPU
,
ALL_LAYOUT
,
phi
::
ModuloKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
remainder
,
GPU
,
ALL_LAYOUT
,
phi
::
RemainderKernel
,
float
,
double
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
floor_divide
,
KPS
,
ALL_LAYOUT
,
phi
::
FloorDivideKernel
,
int
,
int64_t
)
{}
PD_REGISTER_KERNEL
(
elementwise_heaviside
,
...
...
paddle/phi/kernels/elementwise_kernel.h
浏览文件 @
ad96fe2c
...
...
@@ -60,18 +60,18 @@ void MinimumKernel(const Context& dev_ctx,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
ModuloRawKernel
(
const
Context
&
dev_ctx
,
void
RemainderRawKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
RemainderKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
int
axis
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
ModuloKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
FloorDivideRawKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
...
...
@@ -134,13 +134,13 @@ DenseTensor Minimum(const Context& dev_ctx,
}
template
<
typename
T
,
typename
Context
>
DenseTensor
Modulo
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
)
{
DenseTensor
Remainder
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
y
)
{
DenseTensor
dense_out
;
MetaTensor
meta_out
(
&
dense_out
);
ElementwiseInferMeta
(
x
,
y
,
&
meta_out
);
Modulo
Kernel
<
T
,
Context
>
(
dev_ctx
,
x
,
y
,
&
dense_out
);
Remainder
Kernel
<
T
,
Context
>
(
dev_ctx
,
x
,
y
,
&
dense_out
);
return
dense_out
;
}
...
...
paddle/phi/kernels/funcs/elementwise_functor.h
浏览文件 @
ad96fe2c
...
...
@@ -21,7 +21,7 @@ limitations under the License. */
#if defined(__xpu__)
#include <xpu/runtime.h>
#include "xpu/kernel/math_xpu2.h" //pow()
#include "xpu/kernel/math_xpu2.h" //
pow()
#endif
namespace
phi
{
...
...
@@ -499,7 +499,7 @@ struct MinGradXYFunctor {
// Modulo
template
<
typename
T
,
typename
Enable
=
void
>
struct
Modulo
Functor
{
struct
Remainder
Functor
{
inline
HOSTDEVICE
T
operator
()(
const
T
a
,
const
T
b
)
const
{
T
res
=
a
%
b
;
...
...
@@ -511,7 +511,7 @@ struct ModuloFunctor {
};
template
<
typename
T
>
struct
Modulo
Functor
<
struct
Remainder
Functor
<
T
,
typename
std
::
enable_if_t
<
std
::
is_floating_point
<
T
>::
value
>>
{
inline
HOSTDEVICE
T
operator
()(
const
T
a
,
const
T
b
)
const
{
...
...
@@ -525,7 +525,7 @@ struct ModuloFunctor<
};
template
<
typename
T
,
typename
Enable
=
void
>
struct
Inverse
Modulo
Functor
{
struct
Inverse
Remainder
Functor
{
inline
HOSTDEVICE
T
operator
()(
const
T
a
,
const
T
b
)
const
{
T
res
=
b
%
a
;
if
((
res
!=
0
)
&&
((
res
<
0
)
!=
(
a
<
0
)))
res
+=
a
;
...
...
@@ -534,7 +534,7 @@ struct InverseModuloFunctor {
};
template
<
typename
T
>
struct
Inverse
Modulo
Functor
<
struct
Inverse
Remainder
Functor
<
T
,
typename
std
::
enable_if_t
<
std
::
is_floating_point
<
T
>::
value
>>
{
inline
HOSTDEVICE
T
operator
()(
const
T
a
,
const
T
b
)
const
{
...
...
paddle/phi/kernels/kps/elementwise_kernel.cu
浏览文件 @
ad96fe2c
...
...
@@ -42,8 +42,8 @@ void MinimumKernel(const Context& dev_ctx,
int
axis
=
-
1
;
MinimumRawKernel
<
T
>
(
dev_ctx
,
x
,
y
,
axis
,
out
);
}
// Create the definition of
Modulo
DEFINE_CUDA_ELEMENTWISE_OP
(
Modulo
)
// Create the definition of
Remainder
DEFINE_CUDA_ELEMENTWISE_OP
(
Remainder
)
// Create the definition of FloorDivide
DEFINE_CUDA_ELEMENTWISE_OP
(
FloorDivide
)
template
<
typename
T
,
typename
Context
>
...
...
@@ -118,10 +118,10 @@ PD_REGISTER_KERNEL(minimum_raw,
int64_t
,
float16
,
bfloat16
)
{}
PD_REGISTER_KERNEL
(
modulo
_raw
,
PD_REGISTER_KERNEL
(
remainder
_raw
,
KPS
,
ALL_LAYOUT
,
phi
::
Modulo
RawKernel
,
phi
::
Remainder
RawKernel
,
float
,
double
,
int
,
...
...
paddle/phi/ops/compat/elementwise_sig.cc
浏览文件 @
ad96fe2c
...
...
@@ -86,9 +86,9 @@ KernelSignature ElementwiseModOpArgumentMapping(
const
ArgumentMappingContext
&
ctx
)
{
int
axis
=
paddle
::
any_cast
<
int
>
(
ctx
.
Attr
(
"axis"
));
if
(
axis
==
-
1
)
{
return
KernelSignature
(
"
modulo
"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
return
KernelSignature
(
"
remainder
"
,
{
"X"
,
"Y"
},
{},
{
"Out"
});
}
return
KernelSignature
(
"
modulo
_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
return
KernelSignature
(
"
remainder
_raw"
,
{
"X"
,
"Y"
},
{
"axis"
},
{
"Out"
});
}
KernelSignature
ElementwiseFloorDivOpArgumentMapping
(
...
...
@@ -247,7 +247,7 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul, multiply);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_div
,
divide
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_max
,
maximum
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_min
,
minimum
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_mod
,
modulo
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_mod
,
remainder
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_floordiv
,
floor_divide
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_grad
,
add_grad
);
PD_REGISTER_BASE_KERNEL_NAME
(
elementwise_add_grad_grad
,
add_double_grad
);
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
ad96fe2c
...
...
@@ -395,7 +395,7 @@ def monkey_patch_math_varbase():
if
framework
.
_in_eager_mode_
else
(
'__floordiv__'
,
_binary_creator_
(
'__floordiv__'
,
'elementwise_floordiv'
,
False
,
None
)),
(
'__mod__'
,
_binary_creator_
(
'__mod__'
,
'
modulo
'
,
False
,
None
,
True
))
(
'__mod__'
,
_binary_creator_
(
'__mod__'
,
'
remainder
'
,
False
,
None
,
True
))
if
framework
.
_in_eager_mode_
else
(
'__mod__'
,
_binary_creator_
(
'__mod__'
,
'elementwise_mod'
,
False
,
None
)),
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
ad96fe2c
...
...
@@ -205,7 +205,7 @@ OP_NAMEMAPPING = {
'elementwise_sub': 'subtract',
'elementwise_mul': 'multiply',
'elementwise_div': 'divide',
'elementwise_mod': '
modulo
',
'elementwise_mod': '
remainder
',
}
...
...
python/paddle/tensor/math.py
浏览文件 @
ad96fe2c
...
...
@@ -419,12 +419,11 @@ OP_NAMEMAPPING = {
'elementwise_min'
:
'minimum'
,
'elementwise_pow'
:
'elementwise_pow'
,
'elementwise_floordiv'
:
'floor_divide'
,
'elementwise_mod'
:
'modulo'
,
'elementwise_add'
:
'add'
,
'elementwise_sub'
:
'subtract'
,
'elementwise_mul'
:
'multiply'
,
'elementwise_div'
:
'divide'
,
'elementwise_mod'
:
'
modulo
'
,
'elementwise_mod'
:
'
remainder
'
,
}
@
dygraph_only
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录