未验证 提交 6de3bdb3 编写于 作者: H HongyuJia 提交者: GitHub

[phi] change op name linear_interp_v2 to linear_interp (#45128)

* change name linear_interp_v2 to linear_interp

* fix deprecated_op_names

* deprecated_op_names add linear_interp_grad
上级 ac0553a0
...@@ -1428,16 +1428,16 @@ ...@@ -1428,16 +1428,16 @@
kernel : kernel :
func : less_than func : less_than
- api : linear_interp_v2 - api : linear_interp
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output) output : Tensor(output)
infer_meta : infer_meta :
func : InterpolateInferMeta func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor optional: out_size, size_tensor, scale_tensor
kernel : kernel :
func : linear_interp_v2 func : linear_interp
data_type : x data_type : x
backward : linear_interp_v2_grad backward : linear_interp_grad
- api : linspace - api : linspace
args : (Tensor start, Tensor stop, Tensor number, DataType dtype) args : (Tensor start, Tensor stop, Tensor number, DataType dtype)
......
...@@ -1213,8 +1213,8 @@ ...@@ -1213,8 +1213,8 @@
kernel : kernel :
func : lerp_grad func : lerp_grad
- backward_api : linear_interp_v2_grad - backward_api : linear_interp_grad
forward : linear_interp_v2 (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) forward : linear_interp (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad) output : Tensor(x_grad)
infer_meta : infer_meta :
...@@ -1222,7 +1222,7 @@ ...@@ -1222,7 +1222,7 @@
param: [x] param: [x]
optional: out_size, size_tensor, scale_tensor optional: out_size, size_tensor, scale_tensor
kernel : kernel :
func : linear_interp_v2_grad func : linear_interp_grad
data_type : output_grad data_type : output_grad
- backward_api : log10_grad - backward_api : log10_grad
......
...@@ -40,37 +40,40 @@ const std::unordered_set<std::string> standard_kernel_suffixs({ ...@@ -40,37 +40,40 @@ const std::unordered_set<std::string> standard_kernel_suffixs({
* after 2.0, and can no longer be occupied by the previously abandoned ops. * after 2.0, and can no longer be occupied by the previously abandoned ops.
* They are marked here uniformly. * They are marked here uniformly.
*/ */
const std::unordered_set<std::string> deprecated_op_names({"diag", const std::unordered_set<std::string> deprecated_op_names(
"flatten", {"diag",
"flatten_grad", "flatten",
"isinf", "flatten_grad",
"isnan", "isinf",
"unsqueeze", "isnan",
"unsqueeze_grad", "unsqueeze",
"squeeze", "unsqueeze_grad",
"squeeze_grad", "squeeze",
"isfinite", "squeeze_grad",
"matmul", "isfinite",
"fill", "matmul",
"matmul_grad", "fill",
"matmul_grad_grad", "matmul_grad",
"max", "matmul_grad_grad",
"max_grad", "max",
"min", "max_grad",
"min_grad", "min",
"prod", "min_grad",
"prod_grad", "prod",
"any", "prod_grad",
"all", "any",
"reshape", "all",
"reshape_grad", "reshape",
"expand", "reshape_grad",
"expand_as", "expand",
"expand_grad", "expand_as",
"expand_as_grad", "expand_grad",
"one_hot", "expand_as_grad",
"top_k", "one_hot",
"top_k_grad"}); "top_k",
"top_k_grad",
"linear_interp",
"linear_interp_grad"});
class DefaultKernelSignatureMap { class DefaultKernelSignatureMap {
public: public:
......
...@@ -1063,7 +1063,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2_grad, ...@@ -1063,7 +1063,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2_grad,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
} }
PD_REGISTER_KERNEL(linear_interp_v2_grad, PD_REGISTER_KERNEL(linear_interp_grad,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::LinearInterpGradKernel, phi::LinearInterpGradKernel,
......
...@@ -1219,7 +1219,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2, ...@@ -1219,7 +1219,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
} }
PD_REGISTER_KERNEL(linear_interp_v2, PD_REGISTER_KERNEL(linear_interp,
CPU, CPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::LinearInterpKernel, phi::LinearInterpKernel,
......
...@@ -1596,7 +1596,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2_grad, ...@@ -1596,7 +1596,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2_grad,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
} }
PD_REGISTER_KERNEL(linear_interp_v2_grad, PD_REGISTER_KERNEL(linear_interp_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::LinearInterpGradKernel, phi::LinearInterpGradKernel,
......
...@@ -1471,7 +1471,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2, ...@@ -1471,7 +1471,7 @@ PD_REGISTER_KERNEL(trilinear_interp_v2,
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
} }
PD_REGISTER_KERNEL(linear_interp_v2, PD_REGISTER_KERNEL(linear_interp,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::LinearInterpKernel, phi::LinearInterpKernel,
......
...@@ -62,7 +62,7 @@ KernelSignature TrilinearInterpOpArgumentMapping( ...@@ -62,7 +62,7 @@ KernelSignature TrilinearInterpOpArgumentMapping(
KernelSignature LinearInterpOpArgumentMapping( KernelSignature LinearInterpOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("linear_interp_v2", return KernelSignature("linear_interp",
{"X", "OutSize", "SizeTensor", "Scale"}, {"X", "OutSize", "SizeTensor", "Scale"},
{"data_layout", {"data_layout",
"out_d", "out_d",
...@@ -136,7 +136,7 @@ KernelSignature TrilinearInterpGradOpArgumentMapping( ...@@ -136,7 +136,7 @@ KernelSignature TrilinearInterpGradOpArgumentMapping(
KernelSignature LinearInterpGradOpArgumentMapping( KernelSignature LinearInterpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("linear_interp_v2_grad", return KernelSignature("linear_interp_grad",
{"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"}, {"X", "OutSize", "SizeTensor", "Scale", "Out@GRAD"},
{"data_layout", {"data_layout",
"out_d", "out_d",
...@@ -166,6 +166,9 @@ KernelSignature BicubicInterpGradOpArgumentMapping( ...@@ -166,6 +166,9 @@ KernelSignature BicubicInterpGradOpArgumentMapping(
} // namespace phi } // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2, linear_interp);
PD_REGISTER_BASE_KERNEL_NAME(linear_interp_v2_grad, linear_interp_grad);
PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2, PD_REGISTER_ARG_MAPPING_FN(bilinear_interp_v2,
phi::BilinearInterpOpArgumentMapping); phi::BilinearInterpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(nearest_interp_v2, PD_REGISTER_ARG_MAPPING_FN(nearest_interp_v2,
......
...@@ -22,21 +22,21 @@ import paddle.fluid.core as core ...@@ -22,21 +22,21 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.nn.functional import interpolate from paddle.nn.functional import interpolate
from paddle._C_ops import final_state_linear_interp_v2 from paddle._C_ops import final_state_linear_interp
def linear_interp_v2_test(x, def linear_interp_test(x,
OutSize=None, OutSize=None,
SizeTensor=None, SizeTensor=None,
Scale=None, Scale=None,
data_layout='NCHW', data_layout='NCHW',
out_d=-1, out_d=-1,
out_h=-1, out_h=-1,
out_w=-1, out_w=-1,
scale=0.0, scale=[],
interp_method='linear', interp_method='linear',
align_corners=False, align_corners=False,
align_mode=1): align_mode=1):
if isinstance(scale, float) or isinstance(scale, int): if isinstance(scale, float) or isinstance(scale, int):
scale_list = [] scale_list = []
for _ in range(len(x.shape) - 2): for _ in range(len(x.shape) - 2):
...@@ -45,11 +45,12 @@ def linear_interp_v2_test(x, ...@@ -45,11 +45,12 @@ def linear_interp_v2_test(x,
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, list) or isinstance(scale, tuple):
scale = list(map(float, scale)) scale = list(map(float, scale))
if SizeTensor is not None: if SizeTensor is not None:
SizeTensor = [SizeTensor] if not isinstance(SizeTensor, list) and not isinstance(
return final_state_linear_interp_v2(x, OutSize, SizeTensor, Scale, SizeTensor, tuple):
data_layout, out_d, out_h, out_w, scale, SizeTensor = [SizeTensor]
interp_method, align_corners, return final_state_linear_interp(x, OutSize, SizeTensor, Scale, data_layout,
align_mode) out_d, out_h, out_w, scale, interp_method,
align_corners, align_mode)
def linear_interp_np(input, def linear_interp_np(input,
...@@ -107,7 +108,7 @@ def linear_interp_np(input, ...@@ -107,7 +108,7 @@ def linear_interp_np(input,
class TestLinearInterpOp(OpTest): class TestLinearInterpOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = linear_interp_v2_test self.python_api = linear_interp_test
self.out_size = None self.out_size = None
self.actual_shape = None self.actual_shape = None
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
...@@ -219,7 +220,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): ...@@ -219,7 +220,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp):
class TestLinearInterpOpSizeTensor(TestLinearInterpOp): class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
def setUp(self): def setUp(self):
self.python_api = linear_interp_v2_test self.python_api = linear_interp_test
self.out_size = None self.out_size = None
self.actual_shape = None self.actual_shape = None
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
......
...@@ -590,22 +590,16 @@ def interpolate(x, ...@@ -590,22 +590,16 @@ def interpolate(x,
attr_list.append(v) attr_list.append(v)
dy_attr = tuple(attr_list) dy_attr = tuple(attr_list)
eager_args = [x]
eager_args.append(inputs['OutSize'] if 'OutSize' in inputs else None)
eager_args.append(inputs['SizeTensor'] if 'SizeTensor' in
inputs else None)
eager_args.append(inputs['Scale'] if 'Scale' in inputs else None)
eager_args.extend([
attrs['data_layout'], attrs['out_d'], attrs['out_h'], attrs['out_w']
])
eager_args.append(attrs['scale'] if 'scale' in attrs else [])
eager_args.extend([
attrs['interp_method'], attrs['align_corners'], attrs['align_mode']
])
if resample_type == "linear": if resample_type == "linear":
if in_dygraph_mode(): if in_dygraph_mode():
out = _C_ops.final_state_linear_interp_v2(*eager_args) out = _C_ops.final_state_linear_interp(
x, inputs['OutSize'] if 'OutSize' in inputs else None,
inputs['SizeTensor'] if 'SizeTensor' in inputs else None,
inputs['Scale'] if 'Scale' in inputs else None,
attrs['data_layout'], attrs['out_d'], attrs['out_h'],
attrs['out_w'], attrs['scale'] if 'scale' in attrs else [],
attrs['interp_method'], attrs['align_corners'],
attrs['align_mode'])
else: else:
out = _C_ops.linear_interp_v2(x, *dy_attr) out = _C_ops.linear_interp_v2(x, *dy_attr)
elif resample_type == "bilinear": elif resample_type == "bilinear":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册