未验证 提交 ad96fe2c 编写于 作者: C Chen Weihang 提交者: GitHub

rename mod c api name (#45476)

上级 66c8ada0
......@@ -1798,15 +1798,6 @@
func : mode
backward : mode_grad
- api : modulo
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : modulo
backward : modulo_grad
- api : momentum_
args : (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov = false, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision = false, float rescale_grad = 1.0f)
output : Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out)
......@@ -2128,6 +2119,15 @@
func : relu6
backward : relu6_grad
- api : remainder
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : remainder
inplace : (x -> out)
- api : renorm
args : (Tensor x, float p, int axis, float max_norm)
output : Tensor
......
......@@ -1576,17 +1576,6 @@
kernel :
func : mode_grad
- backward_api : modulo_grad
forward : modulo (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : modulo_grad
no_need_buffer : x, y
- backward_api : multi_dot_grad
forward : multi_dot (Tensor[] x) -> Tensor(out)
args : (Tensor[] x, Tensor out_grad)
......
......@@ -46,21 +46,21 @@ void MinimumRawKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void ModuloRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
auto y_dims = y.dims();
if (x_dims.size() >= y_dims.size()) {
funcs::ElementwiseCompute<funcs::ModuloFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ModuloFunctor<T>(), out);
funcs::ElementwiseCompute<funcs::RemainderFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::RemainderFunctor<T>(), out);
} else {
funcs::ElementwiseCompute<funcs::InverseModuloFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::InverseModuloFunctor<T>(), out);
funcs::ElementwiseCompute<funcs::InverseRemainderFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::InverseRemainderFunctor<T>(), out);
}
}
......@@ -139,10 +139,10 @@ PD_REGISTER_KERNEL(minimum_raw,
int,
int64_t,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(modulo_raw,
PD_REGISTER_KERNEL(remainder_raw,
CPU,
ALL_LAYOUT,
phi::ModuloRawKernel,
phi::RemainderRawKernel,
float,
double,
int,
......
......@@ -38,12 +38,12 @@ void MinimumKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void ModuloKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
void RemainderKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
int axis = -1;
ModuloRawKernel<T>(dev_ctx, x, y, axis, out);
RemainderRawKernel<T>(dev_ctx, x, y, axis, out);
}
template <typename T, typename Context>
......@@ -96,8 +96,14 @@ PD_REGISTER_KERNEL(minimum,
int,
int64_t,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(
modulo, CPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(remainder,
CPU,
ALL_LAYOUT,
phi::RemainderKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
......@@ -139,8 +145,14 @@ PD_REGISTER_KERNEL(minimum,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(
modulo, GPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(remainder,
GPU,
ALL_LAYOUT,
phi::RemainderKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, KPS, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
......
......@@ -60,18 +60,18 @@ void MinimumKernel(const Context& dev_ctx,
DenseTensor* out);
template <typename T, typename Context>
void ModuloRawKernel(const Context& dev_ctx,
void RemainderRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out);
template <typename T, typename Context>
void RemainderKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out);
template <typename T, typename Context>
void ModuloKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);
template <typename T, typename Context>
void FloorDivideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
......@@ -134,13 +134,13 @@ DenseTensor Minimum(const Context& dev_ctx,
}
template <typename T, typename Context>
DenseTensor Modulo(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
DenseTensor Remainder(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
ElementwiseInferMeta(x, y, &meta_out);
ModuloKernel<T, Context>(dev_ctx, x, y, &dense_out);
RemainderKernel<T, Context>(dev_ctx, x, y, &dense_out);
return dense_out;
}
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#if defined(__xpu__)
#include <xpu/runtime.h>
#include "xpu/kernel/math_xpu2.h" //pow()
#include "xpu/kernel/math_xpu2.h" // pow()
#endif
namespace phi {
......@@ -499,7 +499,7 @@ struct MinGradXYFunctor {
// Modulo
template <typename T, typename Enable = void>
struct ModuloFunctor {
struct RemainderFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
T res = a % b;
......@@ -511,7 +511,7 @@ struct ModuloFunctor {
};
template <typename T>
struct ModuloFunctor<
struct RemainderFunctor<
T,
typename std::enable_if_t<std::is_floating_point<T>::value>> {
inline HOSTDEVICE T operator()(const T a, const T b) const {
......@@ -525,7 +525,7 @@ struct ModuloFunctor<
};
template <typename T, typename Enable = void>
struct InverseModuloFunctor {
struct InverseRemainderFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
T res = b % a;
if ((res != 0) && ((res < 0) != (a < 0))) res += a;
......@@ -534,7 +534,7 @@ struct InverseModuloFunctor {
};
template <typename T>
struct InverseModuloFunctor<
struct InverseRemainderFunctor<
T,
typename std::enable_if_t<std::is_floating_point<T>::value>> {
inline HOSTDEVICE T operator()(const T a, const T b) const {
......
......@@ -42,8 +42,8 @@ void MinimumKernel(const Context& dev_ctx,
int axis = -1;
MinimumRawKernel<T>(dev_ctx, x, y, axis, out);
}
// Create the definition of Modulo
DEFINE_CUDA_ELEMENTWISE_OP(Modulo)
// Create the definition of Remainder
DEFINE_CUDA_ELEMENTWISE_OP(Remainder)
// Create the definition of FloorDivide
DEFINE_CUDA_ELEMENTWISE_OP(FloorDivide)
template <typename T, typename Context>
......@@ -118,10 +118,10 @@ PD_REGISTER_KERNEL(minimum_raw,
int64_t,
float16,
bfloat16) {}
PD_REGISTER_KERNEL(modulo_raw,
PD_REGISTER_KERNEL(remainder_raw,
KPS,
ALL_LAYOUT,
phi::ModuloRawKernel,
phi::RemainderRawKernel,
float,
double,
int,
......
......@@ -86,9 +86,9 @@ KernelSignature ElementwiseModOpArgumentMapping(
const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("modulo", {"X", "Y"}, {}, {"Out"});
return KernelSignature("remainder", {"X", "Y"}, {}, {"Out"});
}
return KernelSignature("modulo_raw", {"X", "Y"}, {"axis"}, {"Out"});
return KernelSignature("remainder_raw", {"X", "Y"}, {"axis"}, {"Out"});
}
KernelSignature ElementwiseFloorDivOpArgumentMapping(
......@@ -247,7 +247,7 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul, multiply);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_div, divide);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_max, maximum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min, minimum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_mod, modulo);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_mod, remainder);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_floordiv, floor_divide);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad);
......
......@@ -395,7 +395,7 @@ def monkey_patch_math_varbase():
if framework._in_eager_mode_ else
('__floordiv__',
_binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)),
('__mod__', _binary_creator_('__mod__', 'modulo', False, None, True))
('__mod__', _binary_creator_('__mod__', 'remainder', False, None, True))
if framework._in_eager_mode_ else
('__mod__',
_binary_creator_('__mod__', 'elementwise_mod', False, None)),
......
......@@ -205,7 +205,7 @@ OP_NAMEMAPPING = {
'elementwise_sub': 'subtract',
'elementwise_mul': 'multiply',
'elementwise_div': 'divide',
'elementwise_mod': 'modulo',
'elementwise_mod': 'remainder',
}
......
......@@ -419,12 +419,11 @@ OP_NAMEMAPPING = {
'elementwise_min': 'minimum',
'elementwise_pow': 'elementwise_pow',
'elementwise_floordiv': 'floor_divide',
'elementwise_mod': 'modulo',
'elementwise_add': 'add',
'elementwise_sub': 'subtract',
'elementwise_mul': 'multiply',
'elementwise_div': 'divide',
'elementwise_mod': 'modulo',
'elementwise_mod': 'remainder',
}
@dygraph_only
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册