From c737232f08653ce16d17fdaab9226aa51641b9c2 Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Fri, 12 Aug 2022 20:04:13 +0800 Subject: [PATCH] [phi] Transfer linear_interp_v2 yaml to phi (#45072) * support optional> in yaml and eager * delete useless comments in eager_gen.py * fix api_base.py support optional> * python_c_gen.py support optional> * transfer linear_interp_v2 yaml from fluid to phi * fix op_test typo error * change linear_interp_v2 testcase * fix args in final_state_linear_interp_v2 * fix zeropad2d typo. test=document_fix --- .../final_state_generator/eager_gen.py | 40 +++++++++--- .../final_state_generator/python_c_gen.py | 11 +++- paddle/fluid/eager/eager_amp_auto_cast.h | 14 ++++ paddle/fluid/pybind/eager_utils.cc | 65 +++++++++++++++++++ paddle/fluid/pybind/eager_utils.h | 7 ++ paddle/phi/api/lib/api_gen_utils.cc | 12 ++++ paddle/phi/api/lib/api_gen_utils.h | 3 + paddle/phi/api/yaml/generator/api_base.py | 44 +++++++++++-- paddle/phi/api/yaml/legacy_api.yaml | 11 ++++ paddle/phi/api/yaml/legacy_backward.yaml | 12 ++++ .../paddle/fluid/tests/unittests/op_test.py | 2 +- .../unittests/test_linear_interp_v2_op.py | 46 ++++++++++--- python/paddle/nn/functional/common.py | 20 +++++- 13 files changed, 257 insertions(+), 30 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index bc48fe75149..0420bb2dbcb 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -386,6 +386,12 @@ CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \ if( {}.impl() ) {}_optional = paddle::make_optional({}); """ +CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE = \ +""" + paddle::optional> {}_optional; + if( !{}.empty() ) {}_optional = paddle::make_optional>({}); +""" + CHECK_BACKWARD_INPLACE_TEMPLATE = \ """ bool can_be_inplaced = false; @@ -951,11 +957,20 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ) else: assert IsVectorTensorType(ttype) - arg_str = f"const std::vector& {name}" - amp_tensors_vector_list.append(f"{name}") - amp_autocast_list.append( - f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" - ) + if is_optional: + arg_str = f"const paddle::optional>& {name}" + amp_tensors_vector_optional_list.append( + f"if ({name}) amp_tensors_vector.push_back( *{name} );\n" + ) + amp_autocast_optional_list.append( + f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" + ) + else: + arg_str = f"const std::vector& {name}" + amp_tensors_vector_list.append(f"{name}") + amp_autocast_list.append( + f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" + ) inputs_args_definition_list[pos] = arg_str inputs_args_declaration_list[pos] = arg_str @@ -1112,7 +1127,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): kernel_trans2_op_name_str = f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");" amp_tensors_vector_list_str = "{ " + ",".join( amp_tensors_vector_list) + " }" - amp_tensors_vector_optional_list_str = "".join( + amp_tensors_vector_optional_list_str = " ".join( amp_tensors_vector_optional_list) amp_get_dst_dtype_str = f"auto amp_dst_dtype = egr::GetAmpDestDtype(op_name, amp_tensors_vector);\n" amp_autocast_list_str = " ".join( @@ -1377,7 +1392,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): inplace_check_str = "" optional_inplace_var_name = [] # Grad Ins from TensorWrappers - for name, (_, is_fwd_input, + for name, (backward_input_type, is_fwd_input, grad_api_position), in backward_forward_inputs_map.items(): tensor_wrapper_name = GetSavedName(name) transformed_tensor_name = self.TransformToNextGradName(name) @@ -1400,9 +1415,14 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): tensor_wrapper_intermidiate_tensor_str) inplace_grad_input_str = transformed_tensor_name if is_optional: - tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name) + if backward_input_type == "std::vector": + tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE.format( + transformed_tensor_name, transformed_tensor_name, + transformed_tensor_name, transformed_tensor_name) + else: + tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( + transformed_tensor_name, transformed_tensor_name, + transformed_tensor_name, transformed_tensor_name) grad_api_args[ grad_api_position] = transformed_tensor_name + "_optional" diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py index 8fde6951e03..7c2c377d8e6 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -360,9 +360,14 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): inplace_args_pos_map[name] = pos is_optional = (name in optional_inputs) if IsVectorTensorType(ttype): - get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( - name, "GetTensorListFromArgs", forward_api_name, name, pos, - "false") + if is_optional: + get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, "GetOptionalTensorListFromArgs", forward_api_name, + name, pos, "true") + else: + get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, "GetTensorListFromArgs", forward_api_name, name, + pos, "false") else: if is_optional: get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( diff --git a/paddle/fluid/eager/eager_amp_auto_cast.h b/paddle/fluid/eager/eager_amp_auto_cast.h index f98f25635f7..d1813ae3de1 100644 --- a/paddle/fluid/eager/eager_amp_auto_cast.h +++ b/paddle/fluid/eager/eager_amp_auto_cast.h @@ -125,4 +125,18 @@ inline paddle::optional EagerAmpAutoCast( return paddle::none; } +inline paddle::optional> +EagerAmpAutoCasts( + const std::string& inputs_name, + const paddle::optional>& inputs, + const paddle::experimental::DataType& dst_dtype, + std::string op_name, + bool trace_backward = true) { + if (inputs) { + return EagerAmpAutoCasts( + inputs_name, *inputs, dst_dtype, op_name, trace_backward); + } + return paddle::optional>(); +} + } // namespace egr diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 6c1dea40b78..147154ae549 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -970,6 +970,71 @@ std::vector GetTensorListFromArgs( return result; } +paddle::optional> +GetOptionalTensorListFromArgs(const std::string& op_type, + const std::string& arg_name, + PyObject* args, + ssize_t arg_idx, + bool dispensable) { + PyObject* list = PyTuple_GET_ITEM(args, arg_idx); + + if (list == nullptr || list == Py_None) { + if (!dispensable) { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensor, but got " + "None", + op_type, + arg_name, + arg_idx)); + } + return paddle::none; + } + + std::vector result; + + if (PyList_Check(list)) { + Py_ssize_t len = PyList_Size(list); + result.reserve(static_cast(len)); + if (len == 0) { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensors, but got " + "empty list", + op_type, + arg_name, + arg_idx)); + } + for (Py_ssize_t i = 0; i < len; i++) { + result.emplace_back( + reinterpret_cast(PyList_GetItem(list, i))->tensor); + } + } else if (PyTuple_Check(list)) { + Py_ssize_t len = PyTuple_Size(list); + result.reserve(static_cast(len)); + if (len == 0) { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensors, but got " + "empty list", + op_type, + arg_name, + arg_idx)); + } + for (Py_ssize_t i = 0; i < len; i++) { + result.emplace_back( + reinterpret_cast(PyTuple_GetItem(list, i))->tensor); + } + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensors, but got " + "%s", + op_type, + arg_name, + arg_idx, + (reinterpret_cast(list->ob_type))->tp_name)); + } + + return result; +} + paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type, const std::string& arg_name, PyObject* args, diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index df959b9abf4..1878752f428 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -255,6 +255,13 @@ paddle::experimental::Tensor& GetTensorFromArgs(const std::string& op_type, ssize_t arg_idx, bool dispensable = false); +paddle::optional> +GetOptionalTensorListFromArgs(const std::string& op_type, + const std::string& arg_name, + PyObject* args, + ssize_t arg_idx, + bool dispensable = false); + std::vector GetTensorListFromArgs( const std::string& op_type, const std::string& arg_name, diff --git a/paddle/phi/api/lib/api_gen_utils.cc b/paddle/phi/api/lib/api_gen_utils.cc index 14d7fcdee63..992f462a44e 100644 --- a/paddle/phi/api/lib/api_gen_utils.cc +++ b/paddle/phi/api/lib/api_gen_utils.cc @@ -102,6 +102,18 @@ phi::MetaTensor MakeMetaTensor( return phi::MetaTensor(); } +std::vector MakeMetaTensor( + const paddle::optional>& tensors) { + std::vector meta_tensors; + if (tensors) { + meta_tensors.reserve(tensors->size()); + for (auto* t : tensors.get()) { + meta_tensors.emplace_back(*t); + } + } + return meta_tensors; +} + /* ------------------ for output ----------------------- */ phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) { diff --git a/paddle/phi/api/lib/api_gen_utils.h b/paddle/phi/api/lib/api_gen_utils.h index ff71e5bfebf..e990eb0279b 100644 --- a/paddle/phi/api/lib/api_gen_utils.h +++ b/paddle/phi/api/lib/api_gen_utils.h @@ -61,6 +61,9 @@ std::vector MakeMetaTensor( phi::MetaTensor MakeMetaTensor( const paddle::optional& tensor); +std::vector MakeMetaTensor( + const paddle::optional>& tensors); + /* ------------------ for output ----------------------- */ phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out); diff --git a/paddle/phi/api/yaml/generator/api_base.py b/paddle/phi/api/yaml/generator/api_base.py index 7e32bcf3e5c..a1d38ca22cb 100644 --- a/paddle/phi/api/yaml/generator/api_base.py +++ b/paddle/phi/api/yaml/generator/api_base.py @@ -495,7 +495,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} {param}_metas[i] = &{param}_meta_vec[i]; {code_indent} }} """ - + param_code = param_code + param + "_metas, " + elif self.inputs['input_info'][ + param] == "const paddle::optional>&": + meta_tensor_code = meta_tensor_code + f""" +{code_indent} auto {param}_meta_vec = MakeMetaTensor({PREFIX_TENSOR_NAME}{param}); +{code_indent} paddle::optional> {param}_metas({param}_meta_vec.size()); +{code_indent} for (size_t i = 0; i < {param}_meta_vec.size(); ++i) {{ +{code_indent} {param}_metas->at(i) = &{param}_meta_vec[i]; +{code_indent} }} +""" param_code = param_code + param + "_metas, " elif param in self.optional_vars: param_code = param_code + "MakeMetaTensor(" + PREFIX_TENSOR_NAME + param + "), " @@ -547,7 +556,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d 'const paddle::optional&': 'const paddle::optional&', 'const paddle::optional>&': - 'paddle::optional&>' + 'const paddle::optional>&' } dense_out_trans_map = { 'Tensor': 'phi::DenseTensor*', @@ -584,9 +593,23 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d 'support_trans_dtype']: trans_flag = "{false, true}" if input_name in self.optional_vars: - input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}", False)) - input_tensor_code = input_tensor_code + f""" + if self.inputs['input_info'][ + input_name] == "const paddle::optional>&": + input_name_tensor_map[input_name].append( + (f"{PREFIX_TENSOR_NAME}{input_name}_vec", True)) + input_tensor_code = input_tensor_code + f""" +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag}); +{code_indent} paddle::optional> {PREFIX_TENSOR_NAME}{input_name}; +{code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_vec){{ +{code_indent} {PREFIX_TENSOR_NAME}{input_name} = paddle::optional>({PREFIX_TENSOR_NAME}{input_name}_vec->size()); +{code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}_vec->size(); ++i) {{ +{code_indent} {PREFIX_TENSOR_NAME}{input_name}->at(i) = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i); +{code_indent} }} +{code_indent} }}""" + else: + input_name_tensor_map[input_name].append( + (f"{PREFIX_TENSOR_NAME}{input_name}", False)) + input_tensor_code = input_tensor_code + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});""" else: @@ -676,7 +699,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} ddims_vec.clear();""" for input_tensor, is_vector in input_name_tensor_map[input_name]: if is_vector: - input_tensor_code = input_tensor_code + f""" + if input_name in self.optional_vars: + input_tensor_code = input_tensor_code + f""" +{code_indent} if ({input_tensor[:-4]}){{ +{code_indent} ddims_vec.reserve({input_tensor[:-4]}->size()); +{code_indent} for (size_t i = 0; i < {input_tensor[:-4]}->size(); ++i) {{ +{code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}->at(i)).dims()); +{code_indent} }} +{code_indent} }}""" + else: + input_tensor_code = input_tensor_code + f""" {code_indent} ddims_vec.reserve({input_tensor[:-4]}.size()); {code_indent} for (size_t i = 0; i < {input_tensor[:-4]}.size(); ++i) {{ {code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}[i]).dims()); diff --git a/paddle/phi/api/yaml/legacy_api.yaml b/paddle/phi/api/yaml/legacy_api.yaml index 88fca869d06..ebbe497e8d5 100755 --- a/paddle/phi/api/yaml/legacy_api.yaml +++ b/paddle/phi/api/yaml/legacy_api.yaml @@ -1428,6 +1428,17 @@ kernel : func : less_than +- api : linear_interp_v2 + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : linear_interp_v2 + data_type : x + backward : linear_interp_v2_grad + - api : linspace args : (Tensor start, Tensor stop, Tensor number, DataType dtype) output : Tensor diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 12e2d4ab908..6d5f30e5670 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -1213,6 +1213,18 @@ kernel : func : lerp_grad +- backward_api : linear_interp_v2_grad + forward : linear_interp_v2 (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + optional: out_size, size_tensor, scale_tensor + kernel : + func : linear_interp_v2_grad + data_type : output_grad + - backward_api : log10_grad forward : log10 (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index dad503660ea..1bebbe15465 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -153,7 +153,7 @@ def get_numeric_gradient(place, elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX64: tensor_to_check_dtype = np.complex64 elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX128: - tensor_tp_check_dtype = np.complex128 + tensor_to_check_dtype = np.complex128 else: raise ValueError("Not supported data type " + str(tensor_to_check_dtype) + ", tensor name : " + diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index 69d652299be..2e3f6a47cba 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -22,6 +22,34 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import Program, program_guard from paddle.nn.functional import interpolate +from paddle._C_ops import final_state_linear_interp_v2 + + +def linear_interp_v2_test(x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='NCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=0.0, + interp_method='linear', + align_corners=False, + align_mode=1): + if isinstance(scale, float) or isinstance(scale, int): + scale_list = [] + for _ in range(len(x.shape) - 2): + scale_list.append(scale) + scale = list(map(float, scale_list)) + elif isinstance(scale, list) or isinstance(scale, tuple): + scale = list(map(float, scale)) + if SizeTensor is not None: + SizeTensor = [SizeTensor] + return final_state_linear_interp_v2(x, OutSize, SizeTensor, Scale, + data_layout, out_d, out_h, out_w, scale, + interp_method, align_corners, + align_mode) def linear_interp_np(input, @@ -79,6 +107,7 @@ def linear_interp_np(input, class TestLinearInterpOp(OpTest): def setUp(self): + self.python_api = linear_interp_v2_test self.out_size = None self.actual_shape = None self.data_layout = 'NCHW' @@ -125,18 +154,18 @@ class TestLinearInterpOp(OpTest): def test_check_output(self): if platform.system() == "Linux": - self.check_output(atol=1e-7) + self.check_output(atol=1e-7, check_eager=True) else: - self.check_output(atol=1e-5) + self.check_output(atol=1e-5, check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', in_place=True) + self.check_grad(['X'], 'Out', in_place=True, check_eager=True) def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. + self.scale = 0.5 self.out_size = np.array([ 50, ]).astype("int32") @@ -148,9 +177,9 @@ class TestLinearInterpOpDataLayout(TestLinearInterpOp): def init_test_case(self): self.interp_method = 'linear' - self.input_shape = [1, 3, 100] + self.input_shape = [1, 100, 3] self.out_w = 50 - self.scale = 0. + self.scale = 0.5 self.out_size = np.array([ 50, ]).astype("int32") @@ -165,7 +194,7 @@ class TestLinearInterpOpAlignMode(TestLinearInterpOp): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. + self.scale = 0.5 self.out_size = np.array([ 50, ]).astype("int32") @@ -179,7 +208,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0.5 + self.scale = 0.8 self.out_size = np.array([ 50, ]).astype("int32") @@ -190,6 +219,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): class TestLinearInterpOpSizeTensor(TestLinearInterpOp): def setUp(self): + self.python_api = linear_interp_v2_test self.out_size = None self.actual_shape = None self.data_layout = 'NCHW' diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 7f381a88468..f9b9c89c2b0 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -590,8 +590,24 @@ def interpolate(x, attr_list.append(v) dy_attr = tuple(attr_list) + eager_args = [x] + eager_args.append(inputs['OutSize'] if 'OutSize' in inputs else None) + eager_args.append(inputs['SizeTensor'] if 'SizeTensor' in + inputs else None) + eager_args.append(inputs['Scale'] if 'Scale' in inputs else None) + eager_args.extend([ + attrs['data_layout'], attrs['out_d'], attrs['out_h'], attrs['out_w'] + ]) + eager_args.append(attrs['scale'] if 'scale' in attrs else []) + eager_args.extend([ + attrs['interp_method'], attrs['align_corners'], attrs['align_mode'] + ]) + if resample_type == "linear": - out = _C_ops.linear_interp_v2(x, *dy_attr) + if in_dygraph_mode(): + out = _C_ops.final_state_linear_interp_v2(*eager_args) + else: + out = _C_ops.linear_interp_v2(x, *dy_attr) elif resample_type == "bilinear": out = _C_ops.bilinear_interp_v2(x, *dy_attr) elif resample_type == "trilinear": @@ -1557,7 +1573,7 @@ def zeropad2d(x, padding, data_format="NCHW", name=None): name(str, optional): The default value is None. Normally there is no need for user to set this property. - Returns: + Returns: Tensor, padded with 0 according to pad and data type is same as input. Examples: -- GitLab