未验证 提交 8129c22e 编写于 作者: Z zyfncg 提交者: GitHub

Rename elementwise_heaviside to heaviside (#50821)

* rename elementwise_heaviside to heaviside

* delete __init__.py

* fix bug
上级 5041158f
......@@ -413,16 +413,6 @@
kernel :
func : einsum_grad
- backward_op : elementwise_heaviside_grad
forward : heaviside (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : elementwise_heaviside_grad
- backward_op : elementwise_pow_grad
forward : elementwise_pow(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
......@@ -570,6 +560,16 @@
func : hardswish_grad
inplace : (out_grad -> x_grad)
- backward_op : heaviside_grad
forward : heaviside (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : heaviside_grad
- backward_op : hsigmoid_loss_grad
forward : hsigmoid_loss (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool remote_prefetch, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool remote_prefetch, bool is_sparse)
......
......@@ -532,15 +532,6 @@
func : einsum
backward : einsum_grad
- op : elementwise_heaviside
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : elementwise_heaviside
backward : elementwise_heaviside_grad
- op : elementwise_pow
args : (Tensor x, Tensor y)
output : Tensor(out)
......@@ -827,6 +818,15 @@
func : hardswish
backward : hardswish_grad
- op : heaviside
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : heaviside
backward : heaviside_grad
- op : hsigmoid_loss
args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool remote_prefetch, bool is_sparse)
output : Tensor(out), Tensor(pre_out), Tensor(w_out)
......
......@@ -89,10 +89,10 @@ PD_REGISTER_KERNEL(minimum_grad,
int64_t,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(elementwise_heaviside_grad,
PD_REGISTER_KERNEL(heaviside_grad,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
phi::HeavisideGradKernel,
float,
double,
int,
......
......@@ -103,15 +103,14 @@ void ElementwisePowRawKernel(const Context& dev_ctx,
}
template <typename T, typename Context>
void ElementwiseHeavisideRawKernel(const Context& dev_ctx,
void HeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::ElementwiseHeavisideFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ElementwiseHeavisideFunctor<T>(), out);
dev_ctx, x, y, -1, funcs::ElementwiseHeavisideFunctor<T>(), out);
}
} // namespace phi
......@@ -174,10 +173,11 @@ PD_REGISTER_KERNEL(elementwise_pow_raw,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside_raw,
PD_REGISTER_KERNEL(heaviside,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideRawKernel,
phi::HeavisideKernel,
float,
double,
int,
......
......@@ -55,11 +55,10 @@ void MinimumGradKernel(const Context& dev_ctx,
DenseTensor* dy);
template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
void HeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
int axis,
DenseTensor* dx,
DenseTensor* dy);
......
......@@ -68,15 +68,6 @@ void ElementwisePowKernel(const Context& dev_ctx,
ElementwisePowRawKernel<T>(dev_ctx, x, y, axis, out);
}
template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
int axis = -1;
ElementwiseHeavisideRawKernel<T>(dev_ctx, x, y, axis, out);
}
template <typename T, typename Context>
void DivideKernel(const Context& dev_ctx,
const DenseTensor& x,
......@@ -153,14 +144,6 @@ PD_REGISTER_KERNEL(remainder,
int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow,
CPU,
ALL_LAYOUT,
......@@ -262,15 +245,6 @@ PD_REGISTER_KERNEL(remainder,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(
floor_divide, KPS, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(elementwise_pow,
KPS,
ALL_LAYOUT,
......
......@@ -104,14 +104,7 @@ void ElementwisePowKernel(const Context& dev_ctx,
DenseTensor* out);
template <typename T, typename Context>
void ElementwiseHeavisideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out);
template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
void HeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);
......@@ -161,13 +154,13 @@ DenseTensor FloorDivide(const Context& dev_ctx,
}
template <typename T, typename Context>
DenseTensor ElementwiseHeaviside(const Context& dev_ctx,
DenseTensor Heaviside(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
ElementwiseInferMeta(x, y, &meta_out);
ElementwiseHeavisideKernel<T, Context>(dev_ctx, x, y, &dense_out);
HeavisideKernel<T, Context>(dev_ctx, x, y, &dense_out);
return dense_out;
}
......
......@@ -132,10 +132,10 @@ PD_REGISTER_KERNEL(minimum_grad,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(elementwise_heaviside_grad,
PD_REGISTER_KERNEL(heaviside_grad,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
phi::HeavisideGradKernel,
float,
double,
int,
......
......@@ -830,11 +830,10 @@ struct HeavisideGradDy {
};
template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
void HeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
int axis,
DenseTensor* dx,
DenseTensor* dy) {
funcs::ElementwiseGradPreProcess(dout, dx);
......@@ -845,7 +844,7 @@ void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
y,
dout,
dout,
axis,
-1,
dx,
dy,
HeavisideGradDx<T>(),
......
......@@ -56,7 +56,23 @@ void FloorDivideKernel(const Context& dev_ctx,
FloorDivideRawKernel<T>(dev_ctx, x, y, axis, out);
}
// Create the definition of Heaviside
DEFINE_CUDA_ELEMENTWISE_OP(ElementwiseHeaviside)
template <typename T, typename Context>
void HeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
std::vector<const DenseTensor*> inputs;
inputs.reserve(2);
std::vector<DenseTensor*> outputs;
outputs.reserve(1);
inputs.emplace_back(&x);
inputs.emplace_back(&y);
outputs.emplace_back(out);
dev_ctx.template Alloc<T>(out);
funcs::BroadcastKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, inputs, &outputs, -1, funcs::ElementwiseHeavisideFunctor<T>());
}
// Create the definition of Pow
DEFINE_CUDA_ELEMENTWISE_OP(ElementwisePow)
template <typename T, typename Context>
......@@ -148,10 +164,10 @@ PD_REGISTER_KERNEL(floor_divide_raw,
phi::FloorDivideRawKernel,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside_raw,
PD_REGISTER_KERNEL(heaviside,
KPS,
ALL_LAYOUT,
phi::ElementwiseHeavisideRawKernel,
phi::HeavisideKernel,
float,
double,
int,
......
......@@ -102,12 +102,7 @@ KernelSignature ElementwiseFloorDivOpArgumentMapping(
KernelSignature ElementwiseHeavisideOpArgumentMapping(
const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("elementwise_heaviside", {"X", "Y"}, {}, {"Out"});
}
return KernelSignature(
"elementwise_heaviside_raw", {"X", "Y"}, {"axis"}, {"Out"});
return KernelSignature("heaviside", {"X", "Y"}, {}, {"Out"});
}
KernelSignature ElementwisePowOpArgumentMapping(
......@@ -226,10 +221,8 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
KernelSignature ElementwiseHeavisideGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_heaviside_grad",
{"X", "Y", "Out@GRAD"},
{"axis"},
{"X@GRAD", "Y@GRAD"});
return KernelSignature(
"heaviside_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}
KernelSignature ElementwisePowGradOpArgumentMapping(
......@@ -249,6 +242,7 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_max, maximum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min, minimum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_mod, remainder);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_floordiv, floor_divide);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_heaviside, heaviside);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_triple_grad, add_triple_grad);
......@@ -265,6 +259,7 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmax_grad, fmax_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin_grad, fmin_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_max_grad, maximum_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min_grad, minimum_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_heaviside_grad, heaviside_grad);
PD_REGISTER_ARG_MAPPING_FN(elementwise_add,
phi::ElementwiseAddOpArgumentMapping);
......
......@@ -4797,7 +4797,7 @@ def heaviside(x, y, name=None):
# [0. , 1. , 0.30000001]]
"""
if in_dygraph_mode():
return _C_ops.elementwise_heaviside(x, y)
return _C_ops.heaviside(x, y)
else:
op_type = 'elementwise_heaviside'
return _elementwise_op(LayerHelper(op_type, **locals()))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册