未验证 提交 f4da2d4d 编写于 作者: H HongyuJia 提交者: GitHub

[phi] Transfer fluid bicubic_interp_v2 to phi bicubic_interp (add yaml) (#45151)

* transfer bicubic_interp op to phi, change name from bicubic_interp_v2 to bicubic_interp

* test final_state_bicubic_interp api

* testcase match imperative case
上级 951010a2
......@@ -353,6 +353,17 @@
func : bce_loss
backward : bce_loss_grad
- api : bicubic_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp
data_type : x
backward : bicubic_interp_grad
- api : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
......
......@@ -283,6 +283,18 @@
func : bce_loss_grad
inplace : (out_grad -> input_grad)
- backward_api : bicubic_interp_grad
forward : bicubic_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : bicubic_interp_grad
data_type : output_grad
- backward_api : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
......
......@@ -75,7 +75,9 @@ const std::unordered_set<std::string> deprecated_op_names(
"linear_interp",
"linear_interp_grad",
"nearest_interp",
"nearest_interp_grad"});
"nearest_interp_grad",
"bicubic_interp",
"bicubic_interp_grad"});
class DefaultKernelSignatureMap {
public:
......
......@@ -1072,7 +1072,7 @@ PD_REGISTER_KERNEL(linear_interp_grad,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2_grad,
PD_REGISTER_KERNEL(bicubic_interp_grad,
CPU,
ALL_LAYOUT,
phi::BicubicInterpGradKernel,
......
......@@ -1229,12 +1229,8 @@ PD_REGISTER_KERNEL(linear_interp,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2,
CPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double) {
PD_REGISTER_KERNEL(
bicubic_interp, CPU, ALL_LAYOUT, phi::BicubicInterpKernel, float, double) {
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
......@@ -1605,7 +1605,7 @@ PD_REGISTER_KERNEL(linear_interp_grad,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2_grad,
PD_REGISTER_KERNEL(bicubic_interp_grad,
GPU,
ALL_LAYOUT,
phi::BicubicInterpGradKernel,
......
......@@ -1481,7 +1481,7 @@ PD_REGISTER_KERNEL(linear_interp,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp_v2,
PD_REGISTER_KERNEL(bicubic_interp,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
......
......@@ -77,7 +77,7 @@ KernelSignature LinearInterpOpArgumentMapping(
KernelSignature BicubicInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bicubic_interp_v2",
return KernelSignature("bicubic_interp",
{"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout",
"out_d",
......@@ -151,7 +151,7 @@ KernelSignature LinearInterpGradOpArgumentMapping(
KernelSignature BicubicInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bicubic_interp_v2_grad",
return KernelSignature("bicubic_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout",
"out_d",
......@@ -168,9 +168,11 @@ KernelSignature BicubicInterpGradOpArgumentMapping(
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2, linear_interp);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2, nearest_interp);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2, bicubic_interp);
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2_grad, linear_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(nearest_interp_v2_grad, nearest_interp_grad);
PD_REGISTER_BASE_KERNEL_NAME(bicubic_interp_v2_grad, bicubic_interp_grad);
PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2,
phi::BilinearInterpOpArgumentMapping);
......
......@@ -21,7 +21,37 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import interpolate
from paddle._C_ops import final_state_bicubic_interp
def bicubic_interp_test(x,
OutSize=None,
SizeTensor=None,
Scale=None,
data_layout='NCHW',
out_d=-1,
out_h=-1,
out_w=-1,
scale=[],
interp_method='linear',
align_corners=False,
align_mode=1):
if isinstance(scale, float) or isinstance(scale, int):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple):
SizeTensor = [SizeTensor]
return final_state_bicubic_interp(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w, scale,
interp_method, align_corners, align_mode)
def cubic_1(x, a):
......@@ -133,6 +163,7 @@ def bicubic_interp_np(input,
class TestBicubicInterpOp(OpTest):
def setUp(self):
self.python_api = bicubic_interp_test
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
......@@ -140,8 +171,7 @@ class TestBicubicInterpOp(OpTest):
self.op_type = "bicubic_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
# TODO(dev): add self.python_api
self.check_eager = False
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64")
scale_h = 0
scale_w = 0
......@@ -208,7 +238,7 @@ class TestBicubicInterpOp(OpTest):
self.input_shape = [2, 3, 5, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.scale = []
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
......@@ -220,7 +250,7 @@ class TestBicubicInterpCase1(TestBicubicInterpOp):
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.scale = []
self.align_corners = True
......@@ -231,7 +261,7 @@ class TestBicubicInterpCase2(TestBicubicInterpOp):
self.input_shape = [3, 3, 9, 6]
self.out_h = 10
self.out_w = 8
self.scale = 0.
self.scale = []
self.align_corners = True
......@@ -242,7 +272,7 @@ class TestBicubicInterpCase3(TestBicubicInterpOp):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.scale = []
self.align_corners = False
......@@ -253,7 +283,7 @@ class TestBicubicInterpCase4(TestBicubicInterpOp):
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.scale = []
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
......@@ -265,7 +295,7 @@ class TestBicubicInterpCase5(TestBicubicInterpOp):
self.input_shape = [3, 3, 9, 6]
self.out_h = 11
self.out_w = 11
self.scale = 0.
self.scale = []
self.out_size = np.array([6, 4]).astype("int32")
self.align_corners = False
......@@ -289,7 +319,7 @@ class TestBicubicInterpSame(TestBicubicInterpOp):
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.
self.scale = []
self.align_corners = True
......@@ -311,7 +341,7 @@ class TestBicubicInterpDataLayout(TestBicubicInterpOp):
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.scale = []
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.data_layout = "NHWC"
......@@ -319,7 +349,12 @@ class TestBicubicInterpDataLayout(TestBicubicInterpOp):
class TestBicubicInterpOpAPI(unittest.TestCase):
def test_case(self):
def test_imperative_case(self):
with _test_eager_guard():
self.func_case()
self.func_case()
def func_case(self):
np.random.seed(200)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
......@@ -413,11 +448,10 @@ class TestBicubicInterpOpAPI(unittest.TestCase):
class TestBicubicOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_imperative_errors(self):
# the input of interpoalte must be Variable.
x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
[[1, 1, 1, 1]], fluid.CPUPlace())
x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]],
fluid.CPUPlace())
self.assertRaises(TypeError, interpolate, x1)
def test_mode_type():
......@@ -442,10 +476,7 @@ class TestBicubicOpError(unittest.TestCase):
def test_out_shape():
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
out = interpolate(x,
size=[12],
mode='bicubic',
align_corners=False)
out = interpolate(x, size=[12], mode='bicubic', align_corners=False)
def test_attr_data_format():
# for 5-D input, data_format only can be NCDHW or NDHWC
......@@ -459,8 +490,8 @@ class TestBicubicOpError(unittest.TestCase):
def test_actual_shape():
# the actual_shape must be Variable.
x = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
[[1, 1, 1, 1]], fluid.CPUPlace())
x = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]],
fluid.CPUPlace())
out = interpolate(x,
size=[12, 12],
mode='BICUBIC',
......@@ -488,9 +519,8 @@ class TestBicubicOpError(unittest.TestCase):
def test_scale_type():
# the scale must be greater than zero.
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
scale = fluid.create_lod_tensor(np.array([-1, 3, 5,
5]), [[1, 1, 1, 1]],
fluid.CPUPlace())
scale = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
[[1, 1, 1, 1]], fluid.CPUPlace())
out = interpolate(x,
size=None,
mode='bicubic',
......@@ -547,9 +577,7 @@ class TestBicubicOpError(unittest.TestCase):
scale_factor=None)
def test_size_and_scale2():
x = fluid.data(name="input",
shape=[2, 3, 6, 9, 4],
dtype="float32")
x = fluid.data(name="input", shape=[2, 3, 6, 9, 4], dtype="float32")
out = interpolate(x,
size=[2, 2, 2],
mode='trilinear',
......@@ -589,6 +617,10 @@ class TestBicubicOpError(unittest.TestCase):
self.assertRaises(TypeError, test_size_type)
self.assertRaises(ValueError, test_input_shape_1)
def test_errors(self):
with program_guard(Program(), Program()):
self.test_imperative_errors()
if __name__ == "__main__":
paddle.enable_static()
......
......@@ -619,6 +619,16 @@ def interpolate(x,
else:
out = _C_ops.nearest_interp_v2(x, *dy_attr)
elif resample_type == "bicubic":
if in_dygraph_mode():
out = _C_ops.final_state_bicubic_interp(
x, inputs['OutSize'] if 'OutSize' in inputs else None,
inputs['SizeTensor'] if 'SizeTensor' in inputs else None,
inputs['Scale'] if 'Scale' in inputs else None,
attrs['data_layout'], attrs['out_d'], attrs['out_h'],
attrs['out_w'], attrs['scale'] if 'scale' in attrs else [],
attrs['interp_method'], attrs['align_corners'],
attrs['align_mode'])
else:
out = _C_ops.bicubic_interp_v2(x, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册