未验证 提交 2c2137bb 编写于 作者: H HongyuJia 提交者: GitHub

[phi] Transfer fluid bilinear_interp_v2 to phi bilinear_interp (add yaml) (#45140)

* transfer bilinear op to phi, change bname from bilinear_interp_v2 to bilinear_interp

* reserve linear_interp param

* fix cross device import
上级 bcbb7a97
......@@ -364,6 +364,17 @@
data_type : x
backward : bicubic_interp_grad
- api : bilinear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bilinear_interp
data_type : x
backward : bilinear_interp_grad
- api : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
......
......@@ -295,6 +295,18 @@
func : bicubic_interp_grad
data_type : output_grad
- backward_api : bilinear_interp_grad
forward : bilinear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : bilinear_interp_grad
data_type : output_grad
- backward_api : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
......
......@@ -74,6 +74,8 @@ const std::unordered_set<std::string> deprecated_op_names(
"top_k_grad",
"linear_interp",
"linear_interp_grad",
"bilinear_interp",
"bilinear_interp_grad",
"nearest_interp",
"nearest_interp_grad",
"bicubic_interp",
......
......@@ -1036,7 +1036,7 @@ void BicubicInterpGradKernel(
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2_grad,
PD_REGISTER_KERNEL(bilinear_interp_grad,
CPU,
ALL_LAYOUT,
phi::BilinearInterpGradKernel,
......
......@@ -1187,7 +1187,7 @@ void BicubicInterpKernel(
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2,
PD_REGISTER_KERNEL(bilinear_interp,
CPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
......
......@@ -1569,7 +1569,7 @@ void BicubicInterpGradKernel(
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2_grad,
PD_REGISTER_KERNEL(bilinear_interp_grad,
GPU,
ALL_LAYOUT,
phi::BilinearInterpGradKernel,
......
......@@ -1440,7 +1440,7 @@ void BicubicInterpKernel(
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2,
PD_REGISTER_KERNEL(bilinear_interp,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
......
......@@ -18,7 +18,7 @@ namespace phi {
KernelSignature BilinearInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear_interp_v2",
return KernelSignature("bilinear_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
......@@ -92,7 +92,7 @@ KernelSignature BicubicInterpOpArgumentMapping(
KernelSignature BilinearInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear_interp_v2_grad",
return KernelSignature("bilinear_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
......@@ -167,10 +167,12 @@ KernelSignature BicubicInterpGradOpArgumentMapping(
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2, linear_interp);
PD_REGISTER_BASE_KERNEL_NAME(bilinear_interp_v2, bilinear_interp);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2, nearest_interp);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2, bicubic_interp);
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2_grad, linear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(bilinear_interp_v2_grad, bilinear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2_grad, nearest_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2_grad, bicubic_interp_grad);
......
......@@ -23,7 +23,6 @@ import paddle
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import interpolate
from paddle._C_ops import final_state_bicubic_interp
def bicubic_interp_test(x,
......@@ -49,9 +48,11 @@ def bicubic_interp_test(x,
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return final_state_bicubic_interp(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w, scale,
interp_method, align_corners, align_mode)
return paddle._C_ops.final_state_bicubic_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method,
align_corners, align_mode)
def cubic_1(x, a):
......
......@@ -23,6 +23,36 @@ from paddle.nn.functional import interpolate
import paddle
def bilinear_interp_test(x,
OutSize=None,
SizeTensor=None,
Scale=None,
data_layout='NCHW',
out_d=-1,
out_h=-1,
out_w=-1,
scale=[],
interp_method='linear',
align_corners=False,
align_mode=1):
if isinstance(scale, float) or isinstance(scale, int):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return paddle._C_ops.final_state_bilinear_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method,
align_corners, align_mode)
def bilinear_interp_np(input,
out_h,
out_w,
......@@ -106,6 +136,7 @@ def bilinear_interp_np(input,
class TestBilinearInterpOp(OpTest):
def setUp(self):
self.python_api = bilinear_interp_test
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
......@@ -164,17 +195,17 @@ class TestBilinearInterpOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True)
self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.scale = []
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -187,7 +218,7 @@ class TestBilinearInterpCase1(TestBilinearInterpOp):
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.scale = []
self.align_corners = True
self.align_mode = 1
......@@ -199,7 +230,7 @@ class TestBilinearInterpCase2(TestBilinearInterpOp):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.scale = []
self.align_corners = True
self.align_mode = 1
......@@ -211,7 +242,7 @@ class TestBilinearInterpCase3(TestBilinearInterpOp):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.scale = []
self.align_corners = True
self.align_mode = 1
......@@ -223,7 +254,7 @@ class TestBilinearInterpCase4(TestBilinearInterpOp):
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.scale = []
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -236,7 +267,7 @@ class TestBilinearInterpCase5(TestBilinearInterpOp):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.scale = []
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -249,7 +280,7 @@ class TestBilinearInterpCase6(TestBilinearInterpOp):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.scale = []
self.out_size = np.array([65, 33]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -274,7 +305,7 @@ class TestBilinearInterpSame(TestBilinearInterpOp):
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.
self.scale = []
self.align_corners = True
self.align_mode = 1
......@@ -286,7 +317,7 @@ class TestBilinearInterpActualShape(TestBilinearInterpOp):
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.scale = []
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -299,7 +330,7 @@ class TestBilinearInterpDataLayout(TestBilinearInterpOp):
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.scale = []
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -309,6 +340,7 @@ class TestBilinearInterpDataLayout(TestBilinearInterpOp):
class TestBilinearInterpOpUint8(OpTest):
def setUp(self):
self.python_api = bilinear_interp_test
self.out_size = None
self.actual_shape = None
self.init_test_case()
......@@ -355,14 +387,16 @@ class TestBilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1)
self.check_output_with_place(place=core.CPUPlace(),
atol=1,
check_eager=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 3, 9, 6]
self.out_h = 10
self.out_w = 9
self.scale = 0.
self.scale = []
self.align_corners = True
self.align_mode = 1
......@@ -374,7 +408,7 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8):
self.input_shape = [2, 3, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.scale = []
self.align_corners = True
self.align_mode = 1
......@@ -386,7 +420,7 @@ class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8):
self.input_shape = [4, 1, 7, 8]
self.out_h = 5
self.out_w = 13
self.scale = 0.
self.scale = []
self.out_size = np.array([6, 15]).astype("int32")
self.align_corners = True
self.align_mode = 1
......@@ -476,6 +510,7 @@ class TestBilinearInterpZero(TestBilinearInterpOp):
class TestBilinearInterpOp_attr_tensor(OpTest):
def setUp(self):
self.python_api = bilinear_interp_test
self.out_size = None
self.actual_shape = None
self.init_test_case()
......@@ -531,17 +566,17 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True)
self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 3
self.out_w = 3
self.scale = 0.
self.scale = []
self.out_size = [3, 3]
self.align_corners = True
......@@ -554,7 +589,7 @@ class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.scale = []
self.out_size = [8, 12]
self.align_corners = True
......@@ -567,7 +602,7 @@ class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor):
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.scale = []
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
......
......@@ -22,7 +22,6 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.nn.functional import interpolate
from paddle._C_ops import final_state_linear_interp
def linear_interp_test(x,
......@@ -48,9 +47,11 @@ def linear_interp_test(x,
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return final_state_linear_interp(x, OutSize, SizeTensor, Scale, data_layout,
out_d, out_h, out_w, scale, interp_method,
align_corners, align_mode)
return paddle._C_ops.final_state_linear_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method, align_corners,
align_mode)
def linear_interp_np(input,
......
......@@ -22,7 +22,6 @@ import paddle.fluid as fluid
import paddle.nn as nn
import paddle
from paddle.nn.functional import interpolate
from paddle import _C_ops
paddle.enable_static()
......@@ -50,9 +49,10 @@ def nearest_interp_test(x,
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return _C_ops.final_state_nearest_interp(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w,
scale, interp_method,
return paddle._C_ops.final_state_nearest_interp(x, OutSize, SizeTensor,
Scale, data_layout, out_d,
out_h, out_w, scale,
interp_method,
align_corners, align_mode)
......
......@@ -603,6 +603,16 @@ def interpolate(x,
else:
out = _C_ops.linear_interp_v2(x, *dy_attr)
elif resample_type == "bilinear":
if in_dygraph_mode():
out = _C_ops.final_state_bilinear_interp(
x, inputs['OutSize'] if 'OutSize' in inputs else None,
inputs['SizeTensor'] if 'SizeTensor' in inputs else None,
inputs['Scale'] if 'Scale' in inputs else None,
attrs['data_layout'], attrs['out_d'], attrs['out_h'],
attrs['out_w'], attrs['scale'] if 'scale' in attrs else [],
attrs['interp_method'], attrs['align_corners'],
attrs['align_mode'])
else:
out = _C_ops.bilinear_interp_v2(x, *dy_attr)
elif resample_type == "trilinear":
out = _C_ops.trilinear_interp_v2(x, *dy_attr)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册