未验证 提交 eb93b5c9 编写于 作者: Z zhangyuqin1998 提交者: GitHub

rename_bilinear_tensor_op (#52745)

上级 5ab07e04
...@@ -158,6 +158,15 @@ ...@@ -158,6 +158,15 @@
data_transform : data_transform :
skip_transform : out_size, size_tensor, scale_tensor skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bilinear_grad
forward : bilinear (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearGradInferMeta
kernel :
func : bilinear_grad
- backward_op : bilinear_interp_grad - backward_op : bilinear_interp_grad
forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output) forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
...@@ -173,15 +182,6 @@ ...@@ -173,15 +182,6 @@
data_transform : data_transform :
skip_transform : out_size, size_tensor, scale_tensor skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearTensorProductGradInferMeta
kernel :
func : bilinear_grad
- backward_op : bmm_grad - backward_op : bmm_grad
forward : bmm (Tensor x, Tensor y) -> Tensor(out) forward : bmm (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
......
...@@ -265,6 +265,12 @@ ...@@ -265,6 +265,12 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : bilinear (bilinear_tensor_product)
inputs :
{x : X, y : Y,weight: Weight, bias: Bias}
outputs :
{out : Out}
- op : bilinear_interp (bilinear_interp_v2) - op : bilinear_interp (bilinear_interp_v2)
backward : bilinear_interp_grad (bilinear_interp_v2_grad) backward : bilinear_interp_grad (bilinear_interp_v2_grad)
inputs : inputs :
...@@ -274,12 +280,6 @@ ...@@ -274,12 +280,6 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : bilinear_tensor_product
inputs :
{x : X, y : Y,weight: Weight, bias: Bias}
outputs :
{out : Out}
- op : bitwise_and - op : bitwise_and
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
......
...@@ -238,6 +238,16 @@ ...@@ -238,6 +238,16 @@
data_transform : data_transform :
skip_transform : out_size, size_tensor, scale_tensor skip_transform : out_size, size_tensor, scale_tensor
- op : bilinear
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearInferMeta
kernel :
func : bilinear
optional : bias
backward : bilinear_grad
- op : bilinear_interp - op : bilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output) output : Tensor(output)
...@@ -251,16 +261,6 @@ ...@@ -251,16 +261,6 @@
data_transform : data_transform :
skip_transform : out_size, size_tensor, scale_tensor skip_transform : out_size, size_tensor, scale_tensor
- op : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearInferMeta
kernel :
func : bilinear
optional : bias
backward : bilinear_tensor_product_grad
- op : bitwise_and - op : bitwise_and
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
......
...@@ -39,14 +39,14 @@ void AngleGradInferMeta(const MetaTensor& x, ...@@ -39,14 +39,14 @@ void AngleGradInferMeta(const MetaTensor& x,
UnchangedInferMeta(x, x_grad); UnchangedInferMeta(x, x_grad);
} }
void BilinearTensorProductGradInferMeta(const MetaTensor& x, void BilinearGradInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
const MetaTensor& weight, const MetaTensor& weight,
const MetaTensor& dout, const MetaTensor& dout,
MetaTensor* dx, MetaTensor* dx,
MetaTensor* dy, MetaTensor* dy,
MetaTensor* dweight, MetaTensor* dweight,
MetaTensor* dbias) { MetaTensor* dbias) {
auto x_dims = x.dims(); auto x_dims = x.dims();
auto y_dims = y.dims(); auto y_dims = y.dims();
auto weight_dims = weight.dims(); auto weight_dims = weight.dims();
......
...@@ -36,14 +36,14 @@ void AngleGradInferMeta(const MetaTensor& x, ...@@ -36,14 +36,14 @@ void AngleGradInferMeta(const MetaTensor& x,
const MetaTensor& out_grad, const MetaTensor& out_grad,
MetaTensor* x_grad); MetaTensor* x_grad);
void BilinearTensorProductGradInferMeta(const MetaTensor& x, void BilinearGradInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
const MetaTensor& weight, const MetaTensor& weight,
const MetaTensor& dout, const MetaTensor& dout,
MetaTensor* dx, MetaTensor* dx,
MetaTensor* dy, MetaTensor* dy,
MetaTensor* dweight, MetaTensor* dweight,
MetaTensor* dbias); MetaTensor* dbias);
void BmmGradInferMeta(const MetaTensor& x, void BmmGradInferMeta(const MetaTensor& x,
const MetaTensor& y, const MetaTensor& y,
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
PD_REGISTER_KERNEL(slice_grad, PD_REGISTER_KERNEL(slice_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::SliceGradRawKernel, phi::SliceGradKernel,
bool, bool,
uint8_t, uint8_t,
int, int,
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
PD_REGISTER_KERNEL(slice_grad, PD_REGISTER_KERNEL(slice_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::SliceGradRawKernel, phi::SliceGradKernel,
bool, bool,
uint8_t, uint8_t,
int, int,
......
...@@ -271,15 +271,15 @@ void SliceGradCompute(const Context& ctx, ...@@ -271,15 +271,15 @@ void SliceGradCompute(const Context& ctx,
} }
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx, void SliceGradKernel(const Context& ctx,
const DenseTensor& input, const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const IntArray& starts_arr, const IntArray& starts_arr,
const IntArray& ends_arr, const IntArray& ends_arr,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) { DenseTensor* input_grad) {
size_t rank = input.dims().size(); size_t rank = input.dims().size();
auto& starts = starts_arr.GetData(); auto& starts = starts_arr.GetData();
......
...@@ -20,15 +20,15 @@ ...@@ -20,15 +20,15 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradRawKernel(const Context& dev_ctx, void SliceGradKernel(const Context& dev_ctx,
const DenseTensor& input, const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const IntArray& starts, const IntArray& starts,
const IntArray& ends, const IntArray& ends,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) { DenseTensor* input_grad) {
const auto& onednn_engine = dev_ctx.GetEngine(); const auto& onednn_engine = dev_ctx.GetEngine();
auto dx_dims = vectorize(input_grad->dims()); auto dx_dims = vectorize(input_grad->dims());
...@@ -81,6 +81,6 @@ void SliceGradRawKernel(const Context& dev_ctx, ...@@ -81,6 +81,6 @@ void SliceGradRawKernel(const Context& dev_ctx,
PD_REGISTER_KERNEL(slice_grad, PD_REGISTER_KERNEL(slice_grad,
OneDNN, OneDNN,
ONEDNN, ONEDNN,
phi::SliceGradRawKernel, phi::SliceGradKernel,
float, float,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
...@@ -21,15 +21,15 @@ ...@@ -21,15 +21,15 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx, void SliceGradKernel(const Context& ctx,
const DenseTensor& input, const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const IntArray& starts, const IntArray& starts,
const IntArray& ends, const IntArray& ends,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad); DenseTensor* input_grad);
template <typename T, typename Context> template <typename T, typename Context>
void SliceArrayGradKernel(const Context& dev_ctx, void SliceArrayGradKernel(const Context& dev_ctx,
......
...@@ -21,15 +21,15 @@ ...@@ -21,15 +21,15 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx, void SliceGradKernel(const Context& ctx,
const DenseTensor& input, const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const IntArray& starts_t, const IntArray& starts_t,
const IntArray& ends_t, const IntArray& ends_t,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) { DenseTensor* input_grad) {
using XPUType = typename XPUTypeTrait<T>::Type; using XPUType = typename XPUTypeTrait<T>::Type;
ctx.template Alloc<T>(input_grad); ctx.template Alloc<T>(input_grad);
...@@ -82,7 +82,7 @@ void SliceGradRawKernel(const Context& ctx, ...@@ -82,7 +82,7 @@ void SliceGradRawKernel(const Context& ctx,
PD_REGISTER_KERNEL(slice_grad, PD_REGISTER_KERNEL(slice_grad,
XPU, XPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::SliceGradRawKernel, phi::SliceGradKernel,
float, float,
int, int,
phi::dtype::float16) {} phi::dtype::float16) {}
...@@ -942,7 +942,7 @@ def bilinear(x1, x2, weight, bias=None, name=None): ...@@ -942,7 +942,7 @@ def bilinear(x1, x2, weight, bias=None, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.bilinear_tensor_product(x1, x2, weight, bias) return _C_ops.bilinear(x1, x2, weight, bias)
else: else:
check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear') check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear') check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册