未验证 提交 eb93b5c9 编写于 作者: Z zhangyuqin1998 提交者: GitHub

rename_bilinear_tensor_op (#52745)

上级 5ab07e04
......@@ -158,6 +158,15 @@
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bilinear_grad
forward : bilinear (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearGradInferMeta
kernel :
func : bilinear_grad
- backward_op : bilinear_interp_grad
forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
......@@ -173,15 +182,6 @@
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearTensorProductGradInferMeta
kernel :
func : bilinear_grad
- backward_op : bmm_grad
forward : bmm (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
......
......@@ -265,6 +265,12 @@
extra :
attrs : [bool use_mkldnn = false]
- op : bilinear (bilinear_tensor_product)
inputs :
{x : X, y : Y,weight: Weight, bias: Bias}
outputs :
{out : Out}
- op : bilinear_interp (bilinear_interp_v2)
backward : bilinear_interp_grad (bilinear_interp_v2_grad)
inputs :
......@@ -274,12 +280,6 @@
extra :
attrs : [bool use_mkldnn = false]
- op : bilinear_tensor_product
inputs :
{x : X, y : Y,weight: Weight, bias: Bias}
outputs :
{out : Out}
- op : bitwise_and
inputs :
{x : X, y : Y}
......
......@@ -238,6 +238,16 @@
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : bilinear
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearInferMeta
kernel :
func : bilinear
optional : bias
backward : bilinear_grad
- op : bilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1)
output : Tensor(output)
......@@ -251,16 +261,6 @@
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearInferMeta
kernel :
func : bilinear
optional : bias
backward : bilinear_tensor_product_grad
- op : bitwise_and
args : (Tensor x, Tensor y)
output : Tensor(out)
......
......@@ -39,14 +39,14 @@ void AngleGradInferMeta(const MetaTensor& x,
UnchangedInferMeta(x, x_grad);
}
void BilinearTensorProductGradInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& dout,
MetaTensor* dx,
MetaTensor* dy,
MetaTensor* dweight,
MetaTensor* dbias) {
void BilinearGradInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& dout,
MetaTensor* dx,
MetaTensor* dy,
MetaTensor* dweight,
MetaTensor* dbias) {
auto x_dims = x.dims();
auto y_dims = y.dims();
auto weight_dims = weight.dims();
......
......@@ -36,14 +36,14 @@ void AngleGradInferMeta(const MetaTensor& x,
const MetaTensor& out_grad,
MetaTensor* x_grad);
void BilinearTensorProductGradInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& dout,
MetaTensor* dx,
MetaTensor* dy,
MetaTensor* dweight,
MetaTensor* dbias);
void BilinearGradInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& weight,
const MetaTensor& dout,
MetaTensor* dx,
MetaTensor* dy,
MetaTensor* dweight,
MetaTensor* dbias);
void BmmGradInferMeta(const MetaTensor& x,
const MetaTensor& y,
......
......@@ -21,7 +21,7 @@
PD_REGISTER_KERNEL(slice_grad,
CPU,
ALL_LAYOUT,
phi::SliceGradRawKernel,
phi::SliceGradKernel,
bool,
uint8_t,
int,
......
......@@ -21,7 +21,7 @@
PD_REGISTER_KERNEL(slice_grad,
GPU,
ALL_LAYOUT,
phi::SliceGradRawKernel,
phi::SliceGradKernel,
bool,
uint8_t,
int,
......
......@@ -271,15 +271,15 @@ void SliceGradCompute(const Context& ctx,
}
template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts_arr,
const IntArray& ends_arr,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
void SliceGradKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts_arr,
const IntArray& ends_arr,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
size_t rank = input.dims().size();
auto& starts = starts_arr.GetData();
......
......@@ -20,15 +20,15 @@
namespace phi {
template <typename T, typename Context>
void SliceGradRawKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
void SliceGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
const auto& onednn_engine = dev_ctx.GetEngine();
auto dx_dims = vectorize(input_grad->dims());
......@@ -81,6 +81,6 @@ void SliceGradRawKernel(const Context& dev_ctx,
PD_REGISTER_KERNEL(slice_grad,
OneDNN,
ONEDNN,
phi::SliceGradRawKernel,
phi::SliceGradKernel,
float,
phi::dtype::bfloat16) {}
......@@ -21,15 +21,15 @@
namespace phi {
template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad);
void SliceGradKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad);
template <typename T, typename Context>
void SliceArrayGradKernel(const Context& dev_ctx,
......
......@@ -21,15 +21,15 @@
namespace phi {
template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts_t,
const IntArray& ends_t,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
void SliceGradKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts_t,
const IntArray& ends_t,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
using XPUType = typename XPUTypeTrait<T>::Type;
ctx.template Alloc<T>(input_grad);
......@@ -82,7 +82,7 @@ void SliceGradRawKernel(const Context& ctx,
PD_REGISTER_KERNEL(slice_grad,
XPU,
ALL_LAYOUT,
phi::SliceGradRawKernel,
phi::SliceGradKernel,
float,
int,
phi::dtype::float16) {}
......@@ -942,7 +942,7 @@ def bilinear(x1, x2, weight, bias=None, name=None):
"""
if in_dygraph_mode():
return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
return _C_ops.bilinear(x1, x2, weight, bias)
else:
check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册