diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index bc48fe75149a9db701865f953fa4978ab254056a..0420bb2dbcbec22d3d740a057857de0a148d045c 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -386,6 +386,12 @@ CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \ if( {}.impl() ) {}_optional = paddle::make_optional({}); """ +CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE = \ +""" + paddle::optional> {}_optional; + if( !{}.empty() ) {}_optional = paddle::make_optional>({}); +""" + CHECK_BACKWARD_INPLACE_TEMPLATE = \ """ bool can_be_inplaced = false; @@ -951,11 +957,20 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ) else: assert IsVectorTensorType(ttype) - arg_str = f"const std::vector& {name}" - amp_tensors_vector_list.append(f"{name}") - amp_autocast_list.append( - f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" - ) + if is_optional: + arg_str = f"const paddle::optional>& {name}" + amp_tensors_vector_optional_list.append( + f"if ({name}) amp_tensors_vector.push_back( *{name} );\n" + ) + amp_autocast_optional_list.append( + f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" + ) + else: + arg_str = f"const std::vector& {name}" + amp_tensors_vector_list.append(f"{name}") + amp_autocast_list.append( + f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" + ) inputs_args_definition_list[pos] = arg_str inputs_args_declaration_list[pos] = arg_str @@ -1112,7 +1127,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): kernel_trans2_op_name_str = f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");" amp_tensors_vector_list_str = "{ " + ",".join( amp_tensors_vector_list) + " }" - amp_tensors_vector_optional_list_str = "".join( + amp_tensors_vector_optional_list_str = " ".join( amp_tensors_vector_optional_list) amp_get_dst_dtype_str = f"auto amp_dst_dtype = egr::GetAmpDestDtype(op_name, amp_tensors_vector);\n" amp_autocast_list_str = " ".join( @@ -1377,7 +1392,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): inplace_check_str = "" optional_inplace_var_name = [] # Grad Ins from TensorWrappers - for name, (_, is_fwd_input, + for name, (backward_input_type, is_fwd_input, grad_api_position), in backward_forward_inputs_map.items(): tensor_wrapper_name = GetSavedName(name) transformed_tensor_name = self.TransformToNextGradName(name) @@ -1400,9 +1415,14 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): tensor_wrapper_intermidiate_tensor_str) inplace_grad_input_str = transformed_tensor_name if is_optional: - tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name) + if backward_input_type == "std::vector": + tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE.format( + transformed_tensor_name, transformed_tensor_name, + transformed_tensor_name, transformed_tensor_name) + else: + tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( + transformed_tensor_name, transformed_tensor_name, + transformed_tensor_name, transformed_tensor_name) grad_api_args[ grad_api_position] = transformed_tensor_name + "_optional" diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py index 8fde6951e03ece1af92d5dff33abced2ec292fe7..7c2c377d8e69cd7148e2d541e7b2e2ca91f4ca49 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -360,9 +360,14 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): inplace_args_pos_map[name] = pos is_optional = (name in optional_inputs) if IsVectorTensorType(ttype): - get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( - name, "GetTensorListFromArgs", forward_api_name, name, pos, - "false") + if is_optional: + get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, "GetOptionalTensorListFromArgs", forward_api_name, + name, pos, "true") + else: + get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, "GetTensorListFromArgs", forward_api_name, name, + pos, "false") else: if is_optional: get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( diff --git a/paddle/fluid/eager/eager_amp_auto_cast.h b/paddle/fluid/eager/eager_amp_auto_cast.h index f98f25635f703118c180e1e6e446d955a698470a..d1813ae3de1ddbfbd7310ed218857f715bfe0c4e 100644 --- a/paddle/fluid/eager/eager_amp_auto_cast.h +++ b/paddle/fluid/eager/eager_amp_auto_cast.h @@ -125,4 +125,18 @@ inline paddle::optional EagerAmpAutoCast( return paddle::none; } +inline paddle::optional> +EagerAmpAutoCasts( + const std::string& inputs_name, + const paddle::optional>& inputs, + const paddle::experimental::DataType& dst_dtype, + std::string op_name, + bool trace_backward = true) { + if (inputs) { + return EagerAmpAutoCasts( + inputs_name, *inputs, dst_dtype, op_name, trace_backward); + } + return paddle::optional>(); +} + } // namespace egr diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 6c1dea40b7814de72c7bced4fc781eb73b137f64..147154ae549f3eb64cf3e347aab7bee554cc4ffa 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -970,6 +970,71 @@ std::vector GetTensorListFromArgs( return result; } +paddle::optional> +GetOptionalTensorListFromArgs(const std::string& op_type, + const std::string& arg_name, + PyObject* args, + ssize_t arg_idx, + bool dispensable) { + PyObject* list = PyTuple_GET_ITEM(args, arg_idx); + + if (list == nullptr || list == Py_None) { + if (!dispensable) { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensor, but got " + "None", + op_type, + arg_name, + arg_idx)); + } + return paddle::none; + } + + std::vector result; + + if (PyList_Check(list)) { + Py_ssize_t len = PyList_Size(list); + result.reserve(static_cast(len)); + if (len == 0) { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensors, but got " + "empty list", + op_type, + arg_name, + arg_idx)); + } + for (Py_ssize_t i = 0; i < len; i++) { + result.emplace_back( + reinterpret_cast(PyList_GetItem(list, i))->tensor); + } + } else if (PyTuple_Check(list)) { + Py_ssize_t len = PyTuple_Size(list); + result.reserve(static_cast(len)); + if (len == 0) { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensors, but got " + "empty list", + op_type, + arg_name, + arg_idx)); + } + for (Py_ssize_t i = 0; i < len; i++) { + result.emplace_back( + reinterpret_cast(PyTuple_GetItem(list, i))->tensor); + } + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument '%s' (position %d) must be list of Tensors, but got " + "%s", + op_type, + arg_name, + arg_idx, + (reinterpret_cast(list->ob_type))->tp_name)); + } + + return result; +} + paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type, const std::string& arg_name, PyObject* args, diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index df959b9abf4f15aaa676824debc5a71696adb3c2..1878752f4284e8f3cb74b91af0b1804e33936fca 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -255,6 +255,13 @@ paddle::experimental::Tensor& GetTensorFromArgs(const std::string& op_type, ssize_t arg_idx, bool dispensable = false); +paddle::optional> +GetOptionalTensorListFromArgs(const std::string& op_type, + const std::string& arg_name, + PyObject* args, + ssize_t arg_idx, + bool dispensable = false); + std::vector GetTensorListFromArgs( const std::string& op_type, const std::string& arg_name, diff --git a/paddle/phi/api/lib/api_gen_utils.cc b/paddle/phi/api/lib/api_gen_utils.cc index 14d7fcdee634d66f1544d5501a4dec3617334686..992f462a44ebe6bd66e3f405a6e8935e63f56ecb 100644 --- a/paddle/phi/api/lib/api_gen_utils.cc +++ b/paddle/phi/api/lib/api_gen_utils.cc @@ -102,6 +102,18 @@ phi::MetaTensor MakeMetaTensor( return phi::MetaTensor(); } +std::vector MakeMetaTensor( + const paddle::optional>& tensors) { + std::vector meta_tensors; + if (tensors) { + meta_tensors.reserve(tensors->size()); + for (auto* t : tensors.get()) { + meta_tensors.emplace_back(*t); + } + } + return meta_tensors; +} + /* ------------------ for output ----------------------- */ phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) { diff --git a/paddle/phi/api/lib/api_gen_utils.h b/paddle/phi/api/lib/api_gen_utils.h index ff71e5bfebf289b218f83515e1bb7a0949422d52..e990eb0279b0fc047fc0e4410533b5f2b0df8805 100644 --- a/paddle/phi/api/lib/api_gen_utils.h +++ b/paddle/phi/api/lib/api_gen_utils.h @@ -61,6 +61,9 @@ std::vector MakeMetaTensor( phi::MetaTensor MakeMetaTensor( const paddle::optional& tensor); +std::vector MakeMetaTensor( + const paddle::optional>& tensors); + /* ------------------ for output ----------------------- */ phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out); diff --git a/paddle/phi/api/yaml/generator/api_base.py b/paddle/phi/api/yaml/generator/api_base.py index 7e32bcf3e5c40c000f17b2916a14d55681ebbcc1..a1d38ca22cb61c71021fc3c62e64211ecd0c9284 100644 --- a/paddle/phi/api/yaml/generator/api_base.py +++ b/paddle/phi/api/yaml/generator/api_base.py @@ -495,7 +495,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} {param}_metas[i] = &{param}_meta_vec[i]; {code_indent} }} """ - + param_code = param_code + param + "_metas, " + elif self.inputs['input_info'][ + param] == "const paddle::optional>&": + meta_tensor_code = meta_tensor_code + f""" +{code_indent} auto {param}_meta_vec = MakeMetaTensor({PREFIX_TENSOR_NAME}{param}); +{code_indent} paddle::optional> {param}_metas({param}_meta_vec.size()); +{code_indent} for (size_t i = 0; i < {param}_meta_vec.size(); ++i) {{ +{code_indent} {param}_metas->at(i) = &{param}_meta_vec[i]; +{code_indent} }} +""" param_code = param_code + param + "_metas, " elif param in self.optional_vars: param_code = param_code + "MakeMetaTensor(" + PREFIX_TENSOR_NAME + param + "), " @@ -547,7 +556,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d 'const paddle::optional&': 'const paddle::optional&', 'const paddle::optional>&': - 'paddle::optional&>' + 'const paddle::optional>&' } dense_out_trans_map = { 'Tensor': 'phi::DenseTensor*', @@ -584,9 +593,23 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d 'support_trans_dtype']: trans_flag = "{false, true}" if input_name in self.optional_vars: - input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}", False)) - input_tensor_code = input_tensor_code + f""" + if self.inputs['input_info'][ + input_name] == "const paddle::optional>&": + input_name_tensor_map[input_name].append( + (f"{PREFIX_TENSOR_NAME}{input_name}_vec", True)) + input_tensor_code = input_tensor_code + f""" +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag}); +{code_indent} paddle::optional> {PREFIX_TENSOR_NAME}{input_name}; +{code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_vec){{ +{code_indent} {PREFIX_TENSOR_NAME}{input_name} = paddle::optional>({PREFIX_TENSOR_NAME}{input_name}_vec->size()); +{code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}_vec->size(); ++i) {{ +{code_indent} {PREFIX_TENSOR_NAME}{input_name}->at(i) = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i); +{code_indent} }} +{code_indent} }}""" + else: + input_name_tensor_map[input_name].append( + (f"{PREFIX_TENSOR_NAME}{input_name}", False)) + input_tensor_code = input_tensor_code + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});""" else: @@ -676,7 +699,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} ddims_vec.clear();""" for input_tensor, is_vector in input_name_tensor_map[input_name]: if is_vector: - input_tensor_code = input_tensor_code + f""" + if input_name in self.optional_vars: + input_tensor_code = input_tensor_code + f""" +{code_indent} if ({input_tensor[:-4]}){{ +{code_indent} ddims_vec.reserve({input_tensor[:-4]}->size()); +{code_indent} for (size_t i = 0; i < {input_tensor[:-4]}->size(); ++i) {{ +{code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}->at(i)).dims()); +{code_indent} }} +{code_indent} }}""" + else: + input_tensor_code = input_tensor_code + f""" {code_indent} ddims_vec.reserve({input_tensor[:-4]}.size()); {code_indent} for (size_t i = 0; i < {input_tensor[:-4]}.size(); ++i) {{ {code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}[i]).dims()); diff --git a/paddle/phi/api/yaml/legacy_api.yaml b/paddle/phi/api/yaml/legacy_api.yaml index 88fca869d06e60fa6a02c83275f007f9f414c070..ebbe497e8d5055c64dffff02978e8af59719c26d 100755 --- a/paddle/phi/api/yaml/legacy_api.yaml +++ b/paddle/phi/api/yaml/legacy_api.yaml @@ -1428,6 +1428,17 @@ kernel : func : less_than +- api : linear_interp_v2 + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : linear_interp_v2 + data_type : x + backward : linear_interp_v2_grad + - api : linspace args : (Tensor start, Tensor stop, Tensor number, DataType dtype) output : Tensor diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 12e2d4ab908b11400d1765c847949cf1acb6a063..6d5f30e567093cae6b094a73d6b419e897c3195b 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -1213,6 +1213,18 @@ kernel : func : lerp_grad +- backward_api : linear_interp_v2_grad + forward : linear_interp_v2 (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output) + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + optional: out_size, size_tensor, scale_tensor + kernel : + func : linear_interp_v2_grad + data_type : output_grad + - backward_api : log10_grad forward : log10 (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index dad503660eafe692ece435bb8133a349e14cc541..1bebbe154653dfe76265b0265ea2f2f84e5dfe63 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -153,7 +153,7 @@ def get_numeric_gradient(place, elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX64: tensor_to_check_dtype = np.complex64 elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX128: - tensor_tp_check_dtype = np.complex128 + tensor_to_check_dtype = np.complex128 else: raise ValueError("Not supported data type " + str(tensor_to_check_dtype) + ", tensor name : " + diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index 69d652299bef3d5ef451e84827558f5258d5b6c6..2e3f6a47cba9468ce25617793e10a8f63e0d9f70 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -22,6 +22,34 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import Program, program_guard from paddle.nn.functional import interpolate +from paddle._C_ops import final_state_linear_interp_v2 + + +def linear_interp_v2_test(x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='NCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=0.0, + interp_method='linear', + align_corners=False, + align_mode=1): + if isinstance(scale, float) or isinstance(scale, int): + scale_list = [] + for _ in range(len(x.shape) - 2): + scale_list.append(scale) + scale = list(map(float, scale_list)) + elif isinstance(scale, list) or isinstance(scale, tuple): + scale = list(map(float, scale)) + if SizeTensor is not None: + SizeTensor = [SizeTensor] + return final_state_linear_interp_v2(x, OutSize, SizeTensor, Scale, + data_layout, out_d, out_h, out_w, scale, + interp_method, align_corners, + align_mode) def linear_interp_np(input, @@ -79,6 +107,7 @@ def linear_interp_np(input, class TestLinearInterpOp(OpTest): def setUp(self): + self.python_api = linear_interp_v2_test self.out_size = None self.actual_shape = None self.data_layout = 'NCHW' @@ -125,18 +154,18 @@ class TestLinearInterpOp(OpTest): def test_check_output(self): if platform.system() == "Linux": - self.check_output(atol=1e-7) + self.check_output(atol=1e-7, check_eager=True) else: - self.check_output(atol=1e-5) + self.check_output(atol=1e-5, check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', in_place=True) + self.check_grad(['X'], 'Out', in_place=True, check_eager=True) def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. + self.scale = 0.5 self.out_size = np.array([ 50, ]).astype("int32") @@ -148,9 +177,9 @@ class TestLinearInterpOpDataLayout(TestLinearInterpOp): def init_test_case(self): self.interp_method = 'linear' - self.input_shape = [1, 3, 100] + self.input_shape = [1, 100, 3] self.out_w = 50 - self.scale = 0. + self.scale = 0.5 self.out_size = np.array([ 50, ]).astype("int32") @@ -165,7 +194,7 @@ class TestLinearInterpOpAlignMode(TestLinearInterpOp): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. + self.scale = 0.5 self.out_size = np.array([ 50, ]).astype("int32") @@ -179,7 +208,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0.5 + self.scale = 0.8 self.out_size = np.array([ 50, ]).astype("int32") @@ -190,6 +219,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): class TestLinearInterpOpSizeTensor(TestLinearInterpOp): def setUp(self): + self.python_api = linear_interp_v2_test self.out_size = None self.actual_shape = None self.data_layout = 'NCHW' diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 7f381a884689a56e72c63b104fd31330f823e074..f9b9c89c2b0b88b0483cff32030e128ccc4e0f78 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -590,8 +590,24 @@ def interpolate(x, attr_list.append(v) dy_attr = tuple(attr_list) + eager_args = [x] + eager_args.append(inputs['OutSize'] if 'OutSize' in inputs else None) + eager_args.append(inputs['SizeTensor'] if 'SizeTensor' in + inputs else None) + eager_args.append(inputs['Scale'] if 'Scale' in inputs else None) + eager_args.extend([ + attrs['data_layout'], attrs['out_d'], attrs['out_h'], attrs['out_w'] + ]) + eager_args.append(attrs['scale'] if 'scale' in attrs else []) + eager_args.extend([ + attrs['interp_method'], attrs['align_corners'], attrs['align_mode'] + ]) + if resample_type == "linear": - out = _C_ops.linear_interp_v2(x, *dy_attr) + if in_dygraph_mode(): + out = _C_ops.final_state_linear_interp_v2(*eager_args) + else: + out = _C_ops.linear_interp_v2(x, *dy_attr) elif resample_type == "bilinear": out = _C_ops.bilinear_interp_v2(x, *dy_attr) elif resample_type == "trilinear": @@ -1557,7 +1573,7 @@ def zeropad2d(x, padding, data_format="NCHW", name=None): name(str, optional): The default value is None. Normally there is no need for user to set this property. - Returns: + Returns: Tensor, padded with 0 according to pad and data type is same as input. Examples: