未验证 提交 ad96fe2c 编写于 作者: C Chen Weihang 提交者: GitHub

rename mod c api name (#45476)

上级 66c8ada0
...@@ -1798,15 +1798,6 @@ ...@@ -1798,15 +1798,6 @@
func : mode func : mode
backward : mode_grad backward : mode_grad
- api : modulo
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : modulo
backward : modulo_grad
- api : momentum_ - api : momentum_
args : (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov = false, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision = false, float rescale_grad = 1.0f) args : (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov = false, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision = false, float rescale_grad = 1.0f)
output : Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out) output : Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out)
...@@ -2128,6 +2119,15 @@ ...@@ -2128,6 +2119,15 @@
func : relu6 func : relu6
backward : relu6_grad backward : relu6_grad
- api : remainder
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : remainder
inplace : (x -> out)
- api : renorm - api : renorm
args : (Tensor x, float p, int axis, float max_norm) args : (Tensor x, float p, int axis, float max_norm)
output : Tensor output : Tensor
......
...@@ -1576,17 +1576,6 @@ ...@@ -1576,17 +1576,6 @@
kernel : kernel :
func : mode_grad func : mode_grad
- backward_api : modulo_grad
forward : modulo (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : modulo_grad
no_need_buffer : x, y
- backward_api : multi_dot_grad - backward_api : multi_dot_grad
forward : multi_dot (Tensor[] x) -> Tensor(out) forward : multi_dot (Tensor[] x) -> Tensor(out)
args : (Tensor[] x, Tensor out_grad) args : (Tensor[] x, Tensor out_grad)
......
...@@ -46,21 +46,21 @@ void MinimumRawKernel(const Context& dev_ctx, ...@@ -46,21 +46,21 @@ void MinimumRawKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void ModuloRawKernel(const Context& dev_ctx, void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& y, const DenseTensor& y,
int axis, int axis,
DenseTensor* out) { DenseTensor* out) {
// allocate memory for out // allocate memory for out
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims(); auto x_dims = x.dims();
auto y_dims = y.dims(); auto y_dims = y.dims();
if (x_dims.size() >= y_dims.size()) { if (x_dims.size() >= y_dims.size()) {
funcs::ElementwiseCompute<funcs::ModuloFunctor<T>, T>( funcs::ElementwiseCompute<funcs::RemainderFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ModuloFunctor<T>(), out); dev_ctx, x, y, axis, funcs::RemainderFunctor<T>(), out);
} else { } else {
funcs::ElementwiseCompute<funcs::InverseModuloFunctor<T>, T>( funcs::ElementwiseCompute<funcs::InverseRemainderFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::InverseModuloFunctor<T>(), out); dev_ctx, x, y, axis, funcs::InverseRemainderFunctor<T>(), out);
} }
} }
...@@ -139,10 +139,10 @@ PD_REGISTER_KERNEL(minimum_raw, ...@@ -139,10 +139,10 @@ PD_REGISTER_KERNEL(minimum_raw,
int, int,
int64_t, int64_t,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(modulo_raw, PD_REGISTER_KERNEL(remainder_raw,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::ModuloRawKernel, phi::RemainderRawKernel,
float, float,
double, double,
int, int,
......
...@@ -38,12 +38,12 @@ void MinimumKernel(const Context& dev_ctx, ...@@ -38,12 +38,12 @@ void MinimumKernel(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void ModuloKernel(const Context& dev_ctx, void RemainderKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& y, const DenseTensor& y,
DenseTensor* out) { DenseTensor* out) {
int axis = -1; int axis = -1;
ModuloRawKernel<T>(dev_ctx, x, y, axis, out); RemainderRawKernel<T>(dev_ctx, x, y, axis, out);
} }
template <typename T, typename Context> template <typename T, typename Context>
...@@ -96,8 +96,14 @@ PD_REGISTER_KERNEL(minimum, ...@@ -96,8 +96,14 @@ PD_REGISTER_KERNEL(minimum,
int, int,
int64_t, int64_t,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(remainder,
modulo, CPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {} CPU,
ALL_LAYOUT,
phi::RemainderKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {} floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside, PD_REGISTER_KERNEL(elementwise_heaviside,
...@@ -139,8 +145,14 @@ PD_REGISTER_KERNEL(minimum, ...@@ -139,8 +145,14 @@ PD_REGISTER_KERNEL(minimum,
int64_t, int64_t,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(remainder,
modulo, GPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {} GPU,
ALL_LAYOUT,
phi::RemainderKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(
floor_divide, KPS, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {} floor_divide, KPS, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside, PD_REGISTER_KERNEL(elementwise_heaviside,
......
...@@ -60,18 +60,18 @@ void MinimumKernel(const Context& dev_ctx, ...@@ -60,18 +60,18 @@ void MinimumKernel(const Context& dev_ctx,
DenseTensor* out); DenseTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void ModuloRawKernel(const Context& dev_ctx, void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out);
template <typename T, typename Context>
void RemainderKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& y, const DenseTensor& y,
int axis,
DenseTensor* out); DenseTensor* out);
template <typename T, typename Context>
void ModuloKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void FloorDivideRawKernel(const Context& dev_ctx, void FloorDivideRawKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
...@@ -134,13 +134,13 @@ DenseTensor Minimum(const Context& dev_ctx, ...@@ -134,13 +134,13 @@ DenseTensor Minimum(const Context& dev_ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
DenseTensor Modulo(const Context& dev_ctx, DenseTensor Remainder(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& y) { const DenseTensor& y) {
DenseTensor dense_out; DenseTensor dense_out;
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
ElementwiseInferMeta(x, y, &meta_out); ElementwiseInferMeta(x, y, &meta_out);
ModuloKernel<T, Context>(dev_ctx, x, y, &dense_out); RemainderKernel<T, Context>(dev_ctx, x, y, &dense_out);
return dense_out; return dense_out;
} }
......
...@@ -21,7 +21,7 @@ limitations under the License. */ ...@@ -21,7 +21,7 @@ limitations under the License. */
#if defined(__xpu__) #if defined(__xpu__)
#include <xpu/runtime.h> #include <xpu/runtime.h>
#include "xpu/kernel/math_xpu2.h" //pow() #include "xpu/kernel/math_xpu2.h" // pow()
#endif #endif
namespace phi { namespace phi {
...@@ -499,7 +499,7 @@ struct MinGradXYFunctor { ...@@ -499,7 +499,7 @@ struct MinGradXYFunctor {
// Modulo // Modulo
template <typename T, typename Enable = void> template <typename T, typename Enable = void>
struct ModuloFunctor { struct RemainderFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const { inline HOSTDEVICE T operator()(const T a, const T b) const {
T res = a % b; T res = a % b;
...@@ -511,7 +511,7 @@ struct ModuloFunctor { ...@@ -511,7 +511,7 @@ struct ModuloFunctor {
}; };
template <typename T> template <typename T>
struct ModuloFunctor< struct RemainderFunctor<
T, T,
typename std::enable_if_t<std::is_floating_point<T>::value>> { typename std::enable_if_t<std::is_floating_point<T>::value>> {
inline HOSTDEVICE T operator()(const T a, const T b) const { inline HOSTDEVICE T operator()(const T a, const T b) const {
...@@ -525,7 +525,7 @@ struct ModuloFunctor< ...@@ -525,7 +525,7 @@ struct ModuloFunctor<
}; };
template <typename T, typename Enable = void> template <typename T, typename Enable = void>
struct InverseModuloFunctor { struct InverseRemainderFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const { inline HOSTDEVICE T operator()(const T a, const T b) const {
T res = b % a; T res = b % a;
if ((res != 0) && ((res < 0) != (a < 0))) res += a; if ((res != 0) && ((res < 0) != (a < 0))) res += a;
...@@ -534,7 +534,7 @@ struct InverseModuloFunctor { ...@@ -534,7 +534,7 @@ struct InverseModuloFunctor {
}; };
template <typename T> template <typename T>
struct InverseModuloFunctor< struct InverseRemainderFunctor<
T, T,
typename std::enable_if_t<std::is_floating_point<T>::value>> { typename std::enable_if_t<std::is_floating_point<T>::value>> {
inline HOSTDEVICE T operator()(const T a, const T b) const { inline HOSTDEVICE T operator()(const T a, const T b) const {
......
...@@ -42,8 +42,8 @@ void MinimumKernel(const Context& dev_ctx, ...@@ -42,8 +42,8 @@ void MinimumKernel(const Context& dev_ctx,
int axis = -1; int axis = -1;
MinimumRawKernel<T>(dev_ctx, x, y, axis, out); MinimumRawKernel<T>(dev_ctx, x, y, axis, out);
} }
// Create the definition of Modulo // Create the definition of Remainder
DEFINE_CUDA_ELEMENTWISE_OP(Modulo) DEFINE_CUDA_ELEMENTWISE_OP(Remainder)
// Create the definition of FloorDivide // Create the definition of FloorDivide
DEFINE_CUDA_ELEMENTWISE_OP(FloorDivide) DEFINE_CUDA_ELEMENTWISE_OP(FloorDivide)
template <typename T, typename Context> template <typename T, typename Context>
...@@ -118,10 +118,10 @@ PD_REGISTER_KERNEL(minimum_raw, ...@@ -118,10 +118,10 @@ PD_REGISTER_KERNEL(minimum_raw,
int64_t, int64_t,
float16, float16,
bfloat16) {} bfloat16) {}
PD_REGISTER_KERNEL(modulo_raw, PD_REGISTER_KERNEL(remainder_raw,
KPS, KPS,
ALL_LAYOUT, ALL_LAYOUT,
phi::ModuloRawKernel, phi::RemainderRawKernel,
float, float,
double, double,
int, int,
......
...@@ -86,9 +86,9 @@ KernelSignature ElementwiseModOpArgumentMapping( ...@@ -86,9 +86,9 @@ KernelSignature ElementwiseModOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis")); int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) { if (axis == -1) {
return KernelSignature("modulo", {"X", "Y"}, {}, {"Out"}); return KernelSignature("remainder", {"X", "Y"}, {}, {"Out"});
} }
return KernelSignature("modulo_raw", {"X", "Y"}, {"axis"}, {"Out"}); return KernelSignature("remainder_raw", {"X", "Y"}, {"axis"}, {"Out"});
} }
KernelSignature ElementwiseFloorDivOpArgumentMapping( KernelSignature ElementwiseFloorDivOpArgumentMapping(
...@@ -247,7 +247,7 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul, multiply); ...@@ -247,7 +247,7 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul, multiply);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_div, divide); PD_REGISTER_BASE_KERNEL_NAME(elementwise_div, divide);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_max, maximum); PD_REGISTER_BASE_KERNEL_NAME(elementwise_max, maximum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min, minimum); PD_REGISTER_BASE_KERNEL_NAME(elementwise_min, minimum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_mod, modulo); PD_REGISTER_BASE_KERNEL_NAME(elementwise_mod, remainder);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_floordiv, floor_divide); PD_REGISTER_BASE_KERNEL_NAME(elementwise_floordiv, floor_divide);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad); PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad); PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad);
......
...@@ -395,7 +395,7 @@ def monkey_patch_math_varbase(): ...@@ -395,7 +395,7 @@ def monkey_patch_math_varbase():
if framework._in_eager_mode_ else if framework._in_eager_mode_ else
('__floordiv__', ('__floordiv__',
_binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)), _binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)),
('__mod__', _binary_creator_('__mod__', 'modulo', False, None, True)) ('__mod__', _binary_creator_('__mod__', 'remainder', False, None, True))
if framework._in_eager_mode_ else if framework._in_eager_mode_ else
('__mod__', ('__mod__',
_binary_creator_('__mod__', 'elementwise_mod', False, None)), _binary_creator_('__mod__', 'elementwise_mod', False, None)),
......
...@@ -205,7 +205,7 @@ OP_NAMEMAPPING = { ...@@ -205,7 +205,7 @@ OP_NAMEMAPPING = {
'elementwise_sub': 'subtract', 'elementwise_sub': 'subtract',
'elementwise_mul': 'multiply', 'elementwise_mul': 'multiply',
'elementwise_div': 'divide', 'elementwise_div': 'divide',
'elementwise_mod': 'modulo', 'elementwise_mod': 'remainder',
} }
......
...@@ -419,12 +419,11 @@ OP_NAMEMAPPING = { ...@@ -419,12 +419,11 @@ OP_NAMEMAPPING = {
'elementwise_min': 'minimum', 'elementwise_min': 'minimum',
'elementwise_pow': 'elementwise_pow', 'elementwise_pow': 'elementwise_pow',
'elementwise_floordiv': 'floor_divide', 'elementwise_floordiv': 'floor_divide',
'elementwise_mod': 'modulo',
'elementwise_add': 'add', 'elementwise_add': 'add',
'elementwise_sub': 'subtract', 'elementwise_sub': 'subtract',
'elementwise_mul': 'multiply', 'elementwise_mul': 'multiply',
'elementwise_div': 'divide', 'elementwise_div': 'divide',
'elementwise_mod': 'modulo', 'elementwise_mod': 'remainder',
} }
@dygraph_only @dygraph_only
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册