未验证 提交 c737232f 编写于 作者: H HongyuJia 提交者: GitHub

[phi] Transfer linear_interp_v2 yaml to phi (#45072)

* support optional<vector<Tensor>> in yaml and eager

* delete useless comments in eager_gen.py

* fix api_base.py support optional<vector<TTensor>>

* python_c_gen.py support optional<vector<tensor>>

* transfer linear_interp_v2 yaml from fluid to phi

* fix op_test typo error

* change linear_interp_v2 testcase

* fix args in final_state_linear_interp_v2

* fix zeropad2d typo. test=document_fix
上级 8624f3b1
...@@ -386,6 +386,12 @@ CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \ ...@@ -386,6 +386,12 @@ CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \
if( {}.impl() ) {}_optional = paddle::make_optional<paddle::experimental::Tensor>({}); if( {}.impl() ) {}_optional = paddle::make_optional<paddle::experimental::Tensor>({});
""" """
CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE = \
"""
paddle::optional<std::vector<paddle::experimental::Tensor>> {}_optional;
if( !{}.empty() ) {}_optional = paddle::make_optional<std::vector<paddle::experimental::Tensor>>({});
"""
CHECK_BACKWARD_INPLACE_TEMPLATE = \ CHECK_BACKWARD_INPLACE_TEMPLATE = \
""" """
bool can_be_inplaced = false; bool can_be_inplaced = false;
...@@ -951,11 +957,20 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ...@@ -951,11 +957,20 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
) )
else: else:
assert IsVectorTensorType(ttype) assert IsVectorTensorType(ttype)
arg_str = f"const std::vector<paddle::experimental::Tensor>& {name}" if is_optional:
amp_tensors_vector_list.append(f"{name}") arg_str = f"const paddle::optional<std::vector<paddle::experimental::Tensor>>& {name}"
amp_autocast_list.append( amp_tensors_vector_optional_list.append(
f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n" f"if ({name}) amp_tensors_vector.push_back( *{name} );\n"
) )
amp_autocast_optional_list.append(
f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
else:
arg_str = f"const std::vector<paddle::experimental::Tensor>& {name}"
amp_tensors_vector_list.append(f"{name}")
amp_autocast_list.append(
f"auto NEW_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
inputs_args_definition_list[pos] = arg_str inputs_args_definition_list[pos] = arg_str
inputs_args_declaration_list[pos] = arg_str inputs_args_declaration_list[pos] = arg_str
...@@ -1112,7 +1127,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): ...@@ -1112,7 +1127,7 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
kernel_trans2_op_name_str = f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");" kernel_trans2_op_name_str = f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");"
amp_tensors_vector_list_str = "{ " + ",".join( amp_tensors_vector_list_str = "{ " + ",".join(
amp_tensors_vector_list) + " }" amp_tensors_vector_list) + " }"
amp_tensors_vector_optional_list_str = "".join( amp_tensors_vector_optional_list_str = " ".join(
amp_tensors_vector_optional_list) amp_tensors_vector_optional_list)
amp_get_dst_dtype_str = f"auto amp_dst_dtype = egr::GetAmpDestDtype(op_name, amp_tensors_vector);\n" amp_get_dst_dtype_str = f"auto amp_dst_dtype = egr::GetAmpDestDtype(op_name, amp_tensors_vector);\n"
amp_autocast_list_str = " ".join( amp_autocast_list_str = " ".join(
...@@ -1377,7 +1392,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): ...@@ -1377,7 +1392,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
inplace_check_str = "" inplace_check_str = ""
optional_inplace_var_name = [] optional_inplace_var_name = []
# Grad Ins from TensorWrappers # Grad Ins from TensorWrappers
for name, (_, is_fwd_input, for name, (backward_input_type, is_fwd_input,
grad_api_position), in backward_forward_inputs_map.items(): grad_api_position), in backward_forward_inputs_map.items():
tensor_wrapper_name = GetSavedName(name) tensor_wrapper_name = GetSavedName(name)
transformed_tensor_name = self.TransformToNextGradName(name) transformed_tensor_name = self.TransformToNextGradName(name)
...@@ -1400,9 +1415,14 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): ...@@ -1400,9 +1415,14 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
tensor_wrapper_intermidiate_tensor_str) tensor_wrapper_intermidiate_tensor_str)
inplace_grad_input_str = transformed_tensor_name inplace_grad_input_str = transformed_tensor_name
if is_optional: if is_optional:
tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( if backward_input_type == "std::vector<Tensor>":
transformed_tensor_name, transformed_tensor_name, tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name) transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name)
else:
tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name)
grad_api_args[ grad_api_args[
grad_api_position] = transformed_tensor_name + "_optional" grad_api_position] = transformed_tensor_name + "_optional"
......
...@@ -360,9 +360,14 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): ...@@ -360,9 +360,14 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase):
inplace_args_pos_map[name] = pos inplace_args_pos_map[name] = pos
is_optional = (name in optional_inputs) is_optional = (name in optional_inputs)
if IsVectorTensorType(ttype): if IsVectorTensorType(ttype):
get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( if is_optional:
name, "GetTensorListFromArgs", forward_api_name, name, pos, get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format(
"false") name, "GetOptionalTensorListFromArgs", forward_api_name,
name, pos, "true")
else:
get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format(
name, "GetTensorListFromArgs", forward_api_name, name,
pos, "false")
else: else:
if is_optional: if is_optional:
get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format(
......
...@@ -125,4 +125,18 @@ inline paddle::optional<paddle::experimental::Tensor> EagerAmpAutoCast( ...@@ -125,4 +125,18 @@ inline paddle::optional<paddle::experimental::Tensor> EagerAmpAutoCast(
return paddle::none; return paddle::none;
} }
inline paddle::optional<std::vector<paddle::experimental::Tensor>>
EagerAmpAutoCasts(
const std::string& inputs_name,
const paddle::optional<std::vector<paddle::experimental::Tensor>>& inputs,
const paddle::experimental::DataType& dst_dtype,
std::string op_name,
bool trace_backward = true) {
if (inputs) {
return EagerAmpAutoCasts(
inputs_name, *inputs, dst_dtype, op_name, trace_backward);
}
return paddle::optional<std::vector<paddle::experimental::Tensor>>();
}
} // namespace egr } // namespace egr
...@@ -970,6 +970,71 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -970,6 +970,71 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
return result; return result;
} }
paddle::optional<std::vector<paddle::experimental::Tensor>>
GetOptionalTensorListFromArgs(const std::string& op_type,
const std::string& arg_name,
PyObject* args,
ssize_t arg_idx,
bool dispensable) {
PyObject* list = PyTuple_GET_ITEM(args, arg_idx);
if (list == nullptr || list == Py_None) {
if (!dispensable) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensor, but got "
"None",
op_type,
arg_name,
arg_idx));
}
return paddle::none;
}
std::vector<paddle::experimental::Tensor> result;
if (PyList_Check(list)) {
Py_ssize_t len = PyList_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
"empty list",
op_type,
arg_name,
arg_idx));
}
for (Py_ssize_t i = 0; i < len; i++) {
result.emplace_back(
reinterpret_cast<TensorObject*>(PyList_GetItem(list, i))->tensor);
}
} else if (PyTuple_Check(list)) {
Py_ssize_t len = PyTuple_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
"empty list",
op_type,
arg_name,
arg_idx));
}
for (Py_ssize_t i = 0; i < len; i++) {
result.emplace_back(
reinterpret_cast<TensorObject*>(PyTuple_GetItem(list, i))->tensor);
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
"%s",
op_type,
arg_name,
arg_idx,
(reinterpret_cast<PyTypeObject*>(list->ob_type))->tp_name));
}
return result;
}
paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type, paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& arg_name, const std::string& arg_name,
PyObject* args, PyObject* args,
......
...@@ -255,6 +255,13 @@ paddle::experimental::Tensor& GetTensorFromArgs(const std::string& op_type, ...@@ -255,6 +255,13 @@ paddle::experimental::Tensor& GetTensorFromArgs(const std::string& op_type,
ssize_t arg_idx, ssize_t arg_idx,
bool dispensable = false); bool dispensable = false);
paddle::optional<std::vector<paddle::experimental::Tensor>>
GetOptionalTensorListFromArgs(const std::string& op_type,
const std::string& arg_name,
PyObject* args,
ssize_t arg_idx,
bool dispensable = false);
std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
const std::string& op_type, const std::string& op_type,
const std::string& arg_name, const std::string& arg_name,
......
...@@ -102,6 +102,18 @@ phi::MetaTensor MakeMetaTensor( ...@@ -102,6 +102,18 @@ phi::MetaTensor MakeMetaTensor(
return phi::MetaTensor(); return phi::MetaTensor();
} }
std::vector<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<std::vector<const phi::DenseTensor*>>& tensors) {
std::vector<phi::MetaTensor> meta_tensors;
if (tensors) {
meta_tensors.reserve(tensors->size());
for (auto* t : tensors.get()) {
meta_tensors.emplace_back(*t);
}
}
return meta_tensors;
}
/* ------------------ for output ----------------------- */ /* ------------------ for output ----------------------- */
phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) { phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) {
......
...@@ -61,6 +61,9 @@ std::vector<phi::MetaTensor> MakeMetaTensor( ...@@ -61,6 +61,9 @@ std::vector<phi::MetaTensor> MakeMetaTensor(
phi::MetaTensor MakeMetaTensor( phi::MetaTensor MakeMetaTensor(
const paddle::optional<phi::SelectedRows>& tensor); const paddle::optional<phi::SelectedRows>& tensor);
std::vector<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<std::vector<const phi::DenseTensor*>>& tensors);
/* ------------------ for output ----------------------- */ /* ------------------ for output ----------------------- */
phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out); phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out);
......
...@@ -495,7 +495,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ...@@ -495,7 +495,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
{code_indent} {param}_metas[i] = &{param}_meta_vec[i]; {code_indent} {param}_metas[i] = &{param}_meta_vec[i];
{code_indent} }} {code_indent} }}
""" """
param_code = param_code + param + "_metas, "
elif self.inputs['input_info'][
param] == "const paddle::optional<std::vector<Tensor>>&":
meta_tensor_code = meta_tensor_code + f"""
{code_indent} auto {param}_meta_vec = MakeMetaTensor({PREFIX_TENSOR_NAME}{param});
{code_indent} paddle::optional<std::vector<const phi::MetaTensor*>> {param}_metas({param}_meta_vec.size());
{code_indent} for (size_t i = 0; i < {param}_meta_vec.size(); ++i) {{
{code_indent} {param}_metas->at(i) = &{param}_meta_vec[i];
{code_indent} }}
"""
param_code = param_code + param + "_metas, " param_code = param_code + param + "_metas, "
elif param in self.optional_vars: elif param in self.optional_vars:
param_code = param_code + "MakeMetaTensor(" + PREFIX_TENSOR_NAME + param + "), " param_code = param_code + "MakeMetaTensor(" + PREFIX_TENSOR_NAME + param + "), "
...@@ -547,7 +556,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ...@@ -547,7 +556,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
'const paddle::optional<Tensor>&': 'const paddle::optional<Tensor>&':
'const paddle::optional<phi::DenseTensor>&', 'const paddle::optional<phi::DenseTensor>&',
'const paddle::optional<std::vector<Tensor>>&': 'const paddle::optional<std::vector<Tensor>>&':
'paddle::optional<const std::vector<phi::DenseTensor>&>' 'const paddle::optional<std::vector<const phi::DenseTensor*>>&'
} }
dense_out_trans_map = { dense_out_trans_map = {
'Tensor': 'phi::DenseTensor*', 'Tensor': 'phi::DenseTensor*',
...@@ -584,9 +593,23 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ...@@ -584,9 +593,23 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
'support_trans_dtype']: 'support_trans_dtype']:
trans_flag = "{false, true}" trans_flag = "{false, true}"
if input_name in self.optional_vars: if input_name in self.optional_vars:
input_name_tensor_map[input_name].append( if self.inputs['input_info'][
(f"{PREFIX_TENSOR_NAME}{input_name}", False)) input_name] == "const paddle::optional<std::vector<Tensor>>&":
input_tensor_code = input_tensor_code + f""" input_name_tensor_map[input_name].append(
(f"{PREFIX_TENSOR_NAME}{input_name}_vec", True))
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});
{code_indent} paddle::optional<std::vector<const phi::DenseTensor*>> {PREFIX_TENSOR_NAME}{input_name};
{code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_vec){{
{code_indent} {PREFIX_TENSOR_NAME}{input_name} = paddle::optional<std::vector<const phi::DenseTensor*>>({PREFIX_TENSOR_NAME}{input_name}_vec->size());
{code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}_vec->size(); ++i) {{
{code_indent} {PREFIX_TENSOR_NAME}{input_name}->at(i) = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i);
{code_indent} }}
{code_indent} }}"""
else:
input_name_tensor_map[input_name].append(
(f"{PREFIX_TENSOR_NAME}{input_name}", False))
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});"""
else: else:
...@@ -676,7 +699,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ...@@ -676,7 +699,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
{code_indent} ddims_vec.clear();""" {code_indent} ddims_vec.clear();"""
for input_tensor, is_vector in input_name_tensor_map[input_name]: for input_tensor, is_vector in input_name_tensor_map[input_name]:
if is_vector: if is_vector:
input_tensor_code = input_tensor_code + f""" if input_name in self.optional_vars:
input_tensor_code = input_tensor_code + f"""
{code_indent} if ({input_tensor[:-4]}){{
{code_indent} ddims_vec.reserve({input_tensor[:-4]}->size());
{code_indent} for (size_t i = 0; i < {input_tensor[:-4]}->size(); ++i) {{
{code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}->at(i)).dims());
{code_indent} }}
{code_indent} }}"""
else:
input_tensor_code = input_tensor_code + f"""
{code_indent} ddims_vec.reserve({input_tensor[:-4]}.size()); {code_indent} ddims_vec.reserve({input_tensor[:-4]}.size());
{code_indent} for (size_t i = 0; i < {input_tensor[:-4]}.size(); ++i) {{ {code_indent} for (size_t i = 0; i < {input_tensor[:-4]}.size(); ++i) {{
{code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}[i]).dims()); {code_indent} ddims_vec.emplace_back((*{input_tensor[:-4]}[i]).dims());
......
...@@ -1428,6 +1428,17 @@ ...@@ -1428,6 +1428,17 @@
kernel : kernel :
func : less_than func : less_than
- api : linear_interp_v2
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(output)
infer_meta :
func : InterpolateInferMeta
optional: out_size, size_tensor, scale_tensor
kernel :
func : linear_interp_v2
data_type : x
backward : linear_interp_v2_grad
- api : linspace - api : linspace
args : (Tensor start, Tensor stop, Tensor number, DataType dtype) args : (Tensor start, Tensor stop, Tensor number, DataType dtype)
output : Tensor output : Tensor
......
...@@ -1213,6 +1213,18 @@ ...@@ -1213,6 +1213,18 @@
kernel : kernel :
func : lerp_grad func : lerp_grad
- backward_api : linear_interp_v2_grad
forward : linear_interp_v2 (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode) -> Tensor(output)
args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, Tensor output_grad, str data_layout, int out_d, int out_h, int out_w, float[] scale, str interp_method, bool align_corners, int align_mode)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
optional: out_size, size_tensor, scale_tensor
kernel :
func : linear_interp_v2_grad
data_type : output_grad
- backward_api : log10_grad - backward_api : log10_grad
forward : log10 (Tensor x) -> Tensor(out) forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
...@@ -153,7 +153,7 @@ def get_numeric_gradient(place, ...@@ -153,7 +153,7 @@ def get_numeric_gradient(place,
elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX64: elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX64:
tensor_to_check_dtype = np.complex64 tensor_to_check_dtype = np.complex64
elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX128: elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX128:
tensor_tp_check_dtype = np.complex128 tensor_to_check_dtype = np.complex128
else: else:
raise ValueError("Not supported data type " + raise ValueError("Not supported data type " +
str(tensor_to_check_dtype) + ", tensor name : " + str(tensor_to_check_dtype) + ", tensor name : " +
......
...@@ -22,6 +22,34 @@ import paddle.fluid.core as core ...@@ -22,6 +22,34 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.nn.functional import interpolate from paddle.nn.functional import interpolate
from paddle._C_ops import final_state_linear_interp_v2
def linear_interp_v2_test(x,
OutSize=None,
SizeTensor=None,
Scale=None,
data_layout='NCHW',
out_d=-1,
out_h=-1,
out_w=-1,
scale=0.0,
interp_method='linear',
align_corners=False,
align_mode=1):
if isinstance(scale, float) or isinstance(scale, int):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
scale = list(map(float, scale))
if SizeTensor is not None:
SizeTensor = [SizeTensor]
return final_state_linear_interp_v2(x, OutSize, SizeTensor, Scale,
data_layout, out_d, out_h, out_w, scale,
interp_method, align_corners,
align_mode)
def linear_interp_np(input, def linear_interp_np(input,
...@@ -79,6 +107,7 @@ def linear_interp_np(input, ...@@ -79,6 +107,7 @@ def linear_interp_np(input,
class TestLinearInterpOp(OpTest): class TestLinearInterpOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = linear_interp_v2_test
self.out_size = None self.out_size = None
self.actual_shape = None self.actual_shape = None
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
...@@ -125,18 +154,18 @@ class TestLinearInterpOp(OpTest): ...@@ -125,18 +154,18 @@ class TestLinearInterpOp(OpTest):
def test_check_output(self): def test_check_output(self):
if platform.system() == "Linux": if platform.system() == "Linux":
self.check_output(atol=1e-7) self.check_output(atol=1e-7, check_eager=True)
else: else:
self.check_output(atol=1e-5) self.check_output(atol=1e-5, check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True) self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'linear' self.interp_method = 'linear'
self.input_shape = [1, 3, 100] self.input_shape = [1, 3, 100]
self.out_w = 50 self.out_w = 50
self.scale = 0. self.scale = 0.5
self.out_size = np.array([ self.out_size = np.array([
50, 50,
]).astype("int32") ]).astype("int32")
...@@ -148,9 +177,9 @@ class TestLinearInterpOpDataLayout(TestLinearInterpOp): ...@@ -148,9 +177,9 @@ class TestLinearInterpOpDataLayout(TestLinearInterpOp):
def init_test_case(self): def init_test_case(self):
self.interp_method = 'linear' self.interp_method = 'linear'
self.input_shape = [1, 3, 100] self.input_shape = [1, 100, 3]
self.out_w = 50 self.out_w = 50
self.scale = 0. self.scale = 0.5
self.out_size = np.array([ self.out_size = np.array([
50, 50,
]).astype("int32") ]).astype("int32")
...@@ -165,7 +194,7 @@ class TestLinearInterpOpAlignMode(TestLinearInterpOp): ...@@ -165,7 +194,7 @@ class TestLinearInterpOpAlignMode(TestLinearInterpOp):
self.interp_method = 'linear' self.interp_method = 'linear'
self.input_shape = [1, 3, 100] self.input_shape = [1, 3, 100]
self.out_w = 50 self.out_w = 50
self.scale = 0. self.scale = 0.5
self.out_size = np.array([ self.out_size = np.array([
50, 50,
]).astype("int32") ]).astype("int32")
...@@ -179,7 +208,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): ...@@ -179,7 +208,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp):
self.interp_method = 'linear' self.interp_method = 'linear'
self.input_shape = [1, 3, 100] self.input_shape = [1, 3, 100]
self.out_w = 50 self.out_w = 50
self.scale = 0.5 self.scale = 0.8
self.out_size = np.array([ self.out_size = np.array([
50, 50,
]).astype("int32") ]).astype("int32")
...@@ -190,6 +219,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp): ...@@ -190,6 +219,7 @@ class TestLinearInterpOpScale(TestLinearInterpOp):
class TestLinearInterpOpSizeTensor(TestLinearInterpOp): class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
def setUp(self): def setUp(self):
self.python_api = linear_interp_v2_test
self.out_size = None self.out_size = None
self.actual_shape = None self.actual_shape = None
self.data_layout = 'NCHW' self.data_layout = 'NCHW'
......
...@@ -590,8 +590,24 @@ def interpolate(x, ...@@ -590,8 +590,24 @@ def interpolate(x,
attr_list.append(v) attr_list.append(v)
dy_attr = tuple(attr_list) dy_attr = tuple(attr_list)
eager_args = [x]
eager_args.append(inputs['OutSize'] if 'OutSize' in inputs else None)
eager_args.append(inputs['SizeTensor'] if 'SizeTensor' in
inputs else None)
eager_args.append(inputs['Scale'] if 'Scale' in inputs else None)
eager_args.extend([
attrs['data_layout'], attrs['out_d'], attrs['out_h'], attrs['out_w']
])
eager_args.append(attrs['scale'] if 'scale' in attrs else [])
eager_args.extend([
attrs['interp_method'], attrs['align_corners'], attrs['align_mode']
])
if resample_type == "linear": if resample_type == "linear":
out = _C_ops.linear_interp_v2(x, *dy_attr) if in_dygraph_mode():
out = _C_ops.final_state_linear_interp_v2(*eager_args)
else:
out = _C_ops.linear_interp_v2(x, *dy_attr)
elif resample_type == "bilinear": elif resample_type == "bilinear":
out = _C_ops.bilinear_interp_v2(x, *dy_attr) out = _C_ops.bilinear_interp_v2(x, *dy_attr)
elif resample_type == "trilinear": elif resample_type == "trilinear":
...@@ -1557,7 +1573,7 @@ def zeropad2d(x, padding, data_format="NCHW", name=None): ...@@ -1557,7 +1573,7 @@ def zeropad2d(x, padding, data_format="NCHW", name=None):
name(str, optional): The default value is None. Normally there is no need for user name(str, optional): The default value is None. Normally there is no need for user
to set this property. to set this property.
Returns Returns:
Tensor, padded with 0 according to pad and data type is same as input. Tensor, padded with 0 according to pad and data type is same as input.
Examples: Examples:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册