未验证 提交 07d8770f 编写于 作者: C Charles-hit 提交者: GitHub

[static code gen]add error msg in composite maker code gen (#51211)

* support variable parameter in optest

* add error msg for use tensor attr in static code gen

* fix static code gen

* fix prim op test

* modify comment

* fix op test

* fix ci

* remove code
上级 0d096b3b
...@@ -624,6 +624,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO ...@@ -624,6 +624,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
void Apply() override { void Apply() override {
//get inputs //get inputs
{{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict)}} {{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict)}}
{{construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name)}}
//get attr //get attr
{{construct_composite_attr(attrs, fluid_attrs, attr_dict)}} {{construct_composite_attr(attrs, fluid_attrs, attr_dict)}}
//get output //get output
...@@ -691,6 +692,28 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO ...@@ -691,6 +692,28 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endfor %} {%- endfor %}
{%- endmacro %} {%- endmacro %}
{% macro construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name) %}
{% set attrs_length = attrs | length %}
{% for i in range(attrs_length) %}
{% if "tensor_name" in attr_dict[attrs[i]] %}
auto {{'tensor_' + attrs[i]}} = this->GetOptionalSingleForwardInput("{{attr_dict[attrs[i]]['tensor_name']}}");
if ({{'tensor_' + attrs[i]}}) {
PADDLE_THROW(platform::errors::Unimplemented(
"We don't support dynamic tensor attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite"
"for now. "));
}
{%- endif %}
{% if "tensors_name" in attr_dict[attrs[i]] %}
auto {{'tensors_' + attrs[i]}} = this->GetOptionalMultiForwardInput("{{attr_dict[attrs[i]]['tensors_name']}}");
if ({{'tensors_' + attrs[i]}}) {
PADDLE_THROW(platform::errors::Unimplemented(
"We don't support dynamic tensors attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite "
"for now. "));
}
{%- endif %}
{%- endfor %}
{%- endmacro %}
{% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %} {% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %}
{% set attrs_length = attrs | length %} {% set attrs_length = attrs | length %}
{% for i in range(attrs_length) %} {% for i in range(attrs_length) %}
......
...@@ -282,8 +282,11 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) { ...@@ -282,8 +282,11 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) {
grad_sub_block); grad_sub_block);
test(); test();
std::vector<paddle::Tensor> muti_fw_input = test.GetMultiForwardInput("X"); std::vector<paddle::Tensor> muti_fw_input = test.GetMultiForwardInput("X");
std::vector<paddle::optional<paddle::Tensor>> opt_muti_fw_input = paddle::optional<std::vector<paddle::Tensor>> opt_muti_fw_input =
test.GetOptionalMultiForwardInput("X"); test.GetOptionalMultiForwardInput("X");
std::vector<paddle::Tensor> opt_inner = opt_muti_fw_input.is_initialized()
? opt_muti_fw_input.get()
: std::vector<paddle::Tensor>{};
paddle::Tensor fw_out = test.GetSingleForwardOutput("Out"); paddle::Tensor fw_out = test.GetSingleForwardOutput("Out");
paddle::Tensor* fw_out_ptr = test.GetOutputPtr(&fw_out); paddle::Tensor* fw_out_ptr = test.GetOutputPtr(&fw_out);
std::string fw_out_name = test.GetOutputName(fw_out); std::string fw_out_name = test.GetOutputName(fw_out);
...@@ -295,14 +298,10 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) { ...@@ -295,14 +298,10 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) {
ASSERT_EQ( ASSERT_EQ(
static_cast<prim::DescTensor*>(muti_fw_input[1].impl().get())->Name(), static_cast<prim::DescTensor*>(muti_fw_input[1].impl().get())->Name(),
"x1"); "x1");
ASSERT_EQ(opt_muti_fw_input.size(), static_cast<std::size_t>(2)); ASSERT_EQ(opt_inner.size(), static_cast<std::size_t>(2));
ASSERT_EQ(static_cast<prim::DescTensor*>( ASSERT_EQ(static_cast<prim::DescTensor*>(opt_inner[0].impl().get())->Name(),
opt_muti_fw_input[0].get_ptr()->impl().get())
->Name(),
"x0"); "x0");
ASSERT_EQ(static_cast<prim::DescTensor*>( ASSERT_EQ(static_cast<prim::DescTensor*>(opt_inner[1].impl().get())->Name(),
opt_muti_fw_input[1].get_ptr()->impl().get())
->Name(),
"x1"); "x1");
ASSERT_EQ(&fw_out, fw_out_ptr); ASSERT_EQ(&fw_out, fw_out_ptr);
ASSERT_EQ(fw_out_name, "out"); ASSERT_EQ(fw_out_name, "out");
......
...@@ -202,58 +202,71 @@ class CompositeGradOpMakerBase { ...@@ -202,58 +202,71 @@ class CompositeGradOpMakerBase {
return inputs_grads; return inputs_grads;
} }
std::vector<paddle::optional<paddle::Tensor>> GetOptionalMultiForwardOutput( paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiForwardOutput(
const std::string& name) { const std::string& name) {
std::vector<paddle::optional<paddle::Tensor>> outputs_opt; paddle::optional<std::vector<paddle::Tensor>> outputs_opt;
std::vector<framework::VarDesc*> outputs_descs = std::vector<framework::VarDesc*> outputs_descs =
this->MultiForwardOutput(name); this->MultiForwardOutput(name);
outputs_opt.reserve(outputs_descs.size()); if ((outputs_descs.empty())) {
return outputs_opt;
}
std::vector<paddle::Tensor> outputs;
outputs.reserve(outputs_descs.size());
for (const auto& output_desc : outputs_descs) { for (const auto& output_desc : outputs_descs) {
if (output_desc) { if (output_desc) {
outputs_opt.emplace_back(paddle::make_optional<paddle::Tensor>( outputs.emplace_back(paddle::Tensor(
paddle::Tensor(std::make_shared<DescTensor>(output_desc)))); paddle::Tensor(std::make_shared<DescTensor>(output_desc))));
} else { } else {
outputs_opt.emplace_back( outputs.emplace_back(paddle::Tensor(paddle::Tensor()));
paddle::make_optional<paddle::Tensor>(paddle::Tensor()));
} }
} }
outputs_opt = paddle::make_optional<std::vector<paddle::Tensor>>(outputs);
return outputs_opt; return outputs_opt;
} }
std::vector<paddle::optional<paddle::Tensor>> GetOptionalMultiForwardInput( paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiForwardInput(
const std::string& name) { const std::string& name) {
std::vector<paddle::optional<paddle::Tensor>> inputs_opt; paddle::optional<std::vector<paddle::Tensor>> inputs_opt;
std::vector<framework::VarDesc*> inputs_descs = std::vector<framework::VarDesc*> inputs_descs =
this->MultiForwardInput(name); this->MultiForwardInput(name);
inputs_opt.reserve(inputs_descs.size()); if ((inputs_descs.empty())) {
return inputs_opt;
}
std::vector<paddle::Tensor> inputs;
inputs.reserve(inputs_descs.size());
for (const auto& input_desc : inputs_descs) { for (const auto& input_desc : inputs_descs) {
if (input_desc) { if (input_desc) {
inputs_opt.emplace_back(paddle::make_optional<paddle::Tensor>( inputs.emplace_back(paddle::Tensor(
paddle::Tensor(std::make_shared<DescTensor>(input_desc)))); paddle::Tensor(std::make_shared<DescTensor>(input_desc))));
} else { } else {
inputs_opt.emplace_back( inputs.emplace_back(paddle::Tensor(paddle::Tensor()));
paddle::make_optional<paddle::Tensor>(paddle::Tensor()));
} }
} }
inputs_opt = paddle::make_optional<std::vector<paddle::Tensor>>(inputs);
return inputs_opt; return inputs_opt;
} }
std::vector<paddle::optional<paddle::Tensor>> GetOptionalMultiOutputGrad( paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiOutputGrad(
const std::string& name) { const std::string& name) {
std::vector<paddle::optional<paddle::Tensor>> outputs_grads; paddle::optional<std::vector<paddle::Tensor>> outputs_grads_opt;
std::vector<framework::VarDesc*> outputs_grads_descs = std::vector<framework::VarDesc*> outputs_grads_descs =
this->MultiOutputGrad(name); this->MultiOutputGrad(name);
if ((outputs_grads_descs.empty())) {
return outputs_grads_opt;
}
std::vector<paddle::Tensor> outputs_grads;
outputs_grads.reserve(outputs_grads_descs.size()); outputs_grads.reserve(outputs_grads_descs.size());
for (const auto& output_grad_desc : outputs_grads_descs) { for (const auto& output_grad_desc : outputs_grads_descs) {
if (output_grad_desc) { if (output_grad_desc) {
outputs_grads.emplace_back(paddle::make_optional<paddle::Tensor>( outputs_grads.emplace_back(paddle::Tensor(
paddle::Tensor(std::make_shared<DescTensor>(output_grad_desc)))); paddle::Tensor(std::make_shared<DescTensor>(output_grad_desc))));
} else { } else {
outputs_grads.emplace_back( outputs_grads.emplace_back(paddle::Tensor(paddle::Tensor()));
paddle::make_optional<paddle::Tensor>(paddle::Tensor()));
} }
} }
return outputs_grads; outputs_grads_opt =
paddle::make_optional<std::vector<paddle::Tensor>>(outputs_grads);
return outputs_grads_opt;
} }
paddle::Tensor* GetOutputPtr(paddle::Tensor* input) { paddle::Tensor* GetOutputPtr(paddle::Tensor* input) {
......
...@@ -973,7 +973,10 @@ class OpTest(unittest.TestCase): ...@@ -973,7 +973,10 @@ class OpTest(unittest.TestCase):
% self.op_type % self.op_type
) )
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
kernel_sig,
) )
""" we directly return the cal_python_api value because the value is already tensor. """ we directly return the cal_python_api value because the value is already tensor.
""" """
......
...@@ -78,7 +78,11 @@ class OpTestUtils: ...@@ -78,7 +78,11 @@ class OpTestUtils:
@classmethod @classmethod
def prepare_python_api_arguments( def prepare_python_api_arguments(
cls, api, op_proto_ins, op_proto_attrs, kernel_sig cls,
api,
op_proto_ins,
op_proto_attrs,
kernel_sig,
): ):
"""map from `op proto inputs and attrs` to `api input list and api attrs dict` """map from `op proto inputs and attrs` to `api input list and api attrs dict`
...@@ -100,7 +104,7 @@ class OpTestUtils: ...@@ -100,7 +104,7 @@ class OpTestUtils:
def to_defaults_list(params, defaults): def to_defaults_list(params, defaults):
return [defaults[p] for p in params if p in defaults] return [defaults[p] for p in params if p in defaults]
def parse_attri_value(name, op_inputs, op_attrs): def parse_attri_value(name, op_inputs, op_proto_attrs):
"""parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty """parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty
1. if the name in op_attrs, use the op_attrs[name] 1. if the name in op_attrs, use the op_attrs[name]
2. if the name in op_inputs, convert the op_inputs to [type of default value] 2. if the name in op_inputs, convert the op_inputs to [type of default value]
...@@ -155,6 +159,10 @@ class OpTestUtils: ...@@ -155,6 +159,10 @@ class OpTestUtils:
for name in attrs_sig for name in attrs_sig
] ]
results = [] results = []
# hack support variable length parameter(such as paddle.meshgrid(*args,**kwargs)
if api_params == []:
results.append(input_arguments)
return results
api_ignore_param_list = set(['name', 'dtype', 'out', 'output']) api_ignore_param_list = set(['name', 'dtype', 'out', 'output'])
idx_of_op_proto_arguments = 0 idx_of_op_proto_arguments = 0
for idx, arg_name in enumerate(api_params): for idx, arg_name in enumerate(api_params):
...@@ -178,6 +186,7 @@ class OpTestUtils: ...@@ -178,6 +186,7 @@ class OpTestUtils:
def assumption_assert_and_transform(cls, args, inp_num): def assumption_assert_and_transform(cls, args, inp_num):
""" """
transform inputs by the following rules: transform inputs by the following rules:
Note: it may not be possible to distinguish list with one Tensor,you should use wrapper to distinguish.
1. [Tensor] -> Tensor 1. [Tensor] -> Tensor
2. [Tensor, Tensor, ...] -> list of Tensors 2. [Tensor, Tensor, ...] -> list of Tensors
3. None -> None 3. None -> None
...@@ -376,7 +385,7 @@ class PrimForwardChecker: ...@@ -376,7 +385,7 @@ class PrimForwardChecker:
def check(self): def check(self):
if ( if (
self.place is paddle.fluid.libpaddle.CUDAPlace type(self.place) is paddle.fluid.libpaddle.CUDAPlace
and not paddle.is_compiled_with_cuda() and not paddle.is_compiled_with_cuda()
): ):
return return
...@@ -422,7 +431,10 @@ class PrimForwardChecker: ...@@ -422,7 +431,10 @@ class PrimForwardChecker:
_, _,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=True) ) = self.get_eager_input_attr_and_inputdict(stop_gradient=True)
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
) )
inputs_sig, _, _ = self.kernel_sig inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform( args = OpTestUtils.assumption_assert_and_transform(
...@@ -566,7 +578,10 @@ class PrimForwardChecker: ...@@ -566,7 +578,10 @@ class PrimForwardChecker:
stop_gradient=True stop_gradient=True
) )
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, static_inputs, attrs, self.kernel_sig self.python_api,
static_inputs,
attrs,
self.kernel_sig,
) )
inputs_sig, _, _ = self.kernel_sig inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform( args = OpTestUtils.assumption_assert_and_transform(
...@@ -634,7 +649,10 @@ class PrimForwardChecker: ...@@ -634,7 +649,10 @@ class PrimForwardChecker:
_, _,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=True) ) = self.get_eager_input_attr_and_inputdict(stop_gradient=True)
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
) )
inputs_sig, _, _ = self.kernel_sig inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform( args = OpTestUtils.assumption_assert_and_transform(
...@@ -713,7 +731,10 @@ class PrimForwardChecker: ...@@ -713,7 +731,10 @@ class PrimForwardChecker:
_, _,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=True) ) = self.get_eager_input_attr_and_inputdict(stop_gradient=True)
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
) )
inputs_sig, _, _ = self.kernel_sig inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform( args = OpTestUtils.assumption_assert_and_transform(
...@@ -786,7 +807,7 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -786,7 +807,7 @@ class PrimGradChecker(PrimForwardChecker):
def check(self): def check(self):
if ( if (
self.place is paddle.fluid.libpaddle.CUDAPlace type(self.place) is paddle.fluid.libpaddle.CUDAPlace
and not paddle.is_compiled_with_cuda() and not paddle.is_compiled_with_cuda()
): ):
return return
...@@ -873,7 +894,10 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -873,7 +894,10 @@ class PrimGradChecker(PrimForwardChecker):
inputs_dict, inputs_dict,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=False) ) = self.get_eager_input_attr_and_inputdict(stop_gradient=False)
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
) )
inputs_sig, _, outputs_sig = self.kernel_sig inputs_sig, _, outputs_sig = self.kernel_sig
if hasattr(self.op_test, "python_out_sig"): if hasattr(self.op_test, "python_out_sig"):
...@@ -979,7 +1003,10 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -979,7 +1003,10 @@ class PrimGradChecker(PrimForwardChecker):
stop_gradient=False stop_gradient=False
) )
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, static_inputs, attrs, self.kernel_sig self.python_api,
static_inputs,
attrs,
self.kernel_sig,
) )
inputs_sig, _, outputs_sig = self.kernel_sig inputs_sig, _, outputs_sig = self.kernel_sig
if hasattr(self.op_test, "python_out_sig"): if hasattr(self.op_test, "python_out_sig"):
...@@ -1082,7 +1109,10 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1082,7 +1109,10 @@ class PrimGradChecker(PrimForwardChecker):
inputs_dict, inputs_dict,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=False) ) = self.get_eager_input_attr_and_inputdict(stop_gradient=False)
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
) )
inputs_sig, _, outputs_sig = self.kernel_sig inputs_sig, _, outputs_sig = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform( args = OpTestUtils.assumption_assert_and_transform(
...@@ -1194,7 +1224,10 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1194,7 +1224,10 @@ class PrimGradChecker(PrimForwardChecker):
inputs_dict, inputs_dict,
) = self.get_eager_input_attr_and_inputdict(stop_gradient=False) ) = self.get_eager_input_attr_and_inputdict(stop_gradient=False)
args = OpTestUtils.prepare_python_api_arguments( args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
) )
inputs_sig, _, outputs_sig = self.kernel_sig inputs_sig, _, outputs_sig = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform( args = OpTestUtils.assumption_assert_and_transform(
......
...@@ -43,7 +43,7 @@ class TestExpandV2OpRank1(OpTest): ...@@ -43,7 +43,7 @@ class TestExpandV2OpRank1(OpTest):
self.expand_times = [1] self.expand_times = [1]
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
......
...@@ -401,32 +401,37 @@ class TestResizeLinearOpUint8(OpTest): ...@@ -401,32 +401,37 @@ class TestResizeLinearOpUint8(OpTest):
class TestLinearInterpOpError(unittest.TestCase): class TestLinearInterpOpError(unittest.TestCase):
def test_error(self): def test_error(self):
with program_guard(Program(), Program()): with paddle.fluid.framework._static_guard():
with program_guard(Program(), Program()):
def input_shape_error():
x1 = fluid.data(name="x1", shape=[1], dtype="float32") def input_shape_error():
out1 = paddle.nn.Upsample( x1 = fluid.data(name="x1", shape=[1], dtype="float32")
size=[256], data_format='NCW', mode='linear' out1 = paddle.nn.Upsample(
) size=[256], data_format='NCW', mode='linear'
out1_res = out1(x1) )
out1_res = out1(x1)
def data_format_error():
x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") def data_format_error():
out2 = paddle.nn.Upsample( x2 = fluid.data(
size=[256], data_format='NHWCD', mode='linear' name="x2", shape=[1, 3, 128], dtype="float32"
) )
out2_res = out2(x2) out2 = paddle.nn.Upsample(
size=[256], data_format='NHWCD', mode='linear'
def out_shape_error(): )
x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") out2_res = out2(x2)
out3 = paddle.nn.Upsample(
size=[256, 256], data_format='NHWC', mode='linear' def out_shape_error():
) x3 = fluid.data(
out3_res = out3(x3) name="x3", shape=[1, 3, 128], dtype="float32"
)
self.assertRaises(ValueError, input_shape_error) out3 = paddle.nn.Upsample(
self.assertRaises(ValueError, data_format_error) size=[256, 256], data_format='NHWC', mode='linear'
self.assertRaises(ValueError, out_shape_error) )
out3_res = out3(x3)
self.assertRaises(ValueError, input_shape_error)
self.assertRaises(ValueError, data_format_error)
self.assertRaises(ValueError, out_shape_error)
@unittest.skipIf( @unittest.skipIf(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册