diff --git a/paddle/fluid/operators/generator/templates/operator_utils.c.j2 b/paddle/fluid/operators/generator/templates/operator_utils.c.j2 index eac7abee64fef103c461141515c4c48170d3c71c..2fb14e7187a92705cb5b2391879826233da5ce64 100644 --- a/paddle/fluid/operators/generator/templates/operator_utils.c.j2 +++ b/paddle/fluid/operators/generator/templates/operator_utils.c.j2 @@ -624,6 +624,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO void Apply() override { //get inputs {{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict)}} +{{construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name)}} //get attr {{construct_composite_attr(attrs, fluid_attrs, attr_dict)}} //get output @@ -691,6 +692,28 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO {%- endfor %} {%- endmacro %} +{% macro construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name) %} + {% set attrs_length = attrs | length %} + {% for i in range(attrs_length) %} + {% if "tensor_name" in attr_dict[attrs[i]] %} + auto {{'tensor_' + attrs[i]}} = this->GetOptionalSingleForwardInput("{{attr_dict[attrs[i]]['tensor_name']}}"); + if ({{'tensor_' + attrs[i]}}) { + PADDLE_THROW(platform::errors::Unimplemented( + "We don't support dynamic tensor attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite" + "for now. ")); + } + {%- endif %} + {% if "tensors_name" in attr_dict[attrs[i]] %} + auto {{'tensors_' + attrs[i]}} = this->GetOptionalMultiForwardInput("{{attr_dict[attrs[i]]['tensors_name']}}"); + if ({{'tensors_' + attrs[i]}}) { + PADDLE_THROW(platform::errors::Unimplemented( + "We don't support dynamic tensors attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite " + "for now. ")); + } + {%- endif %} + {%- endfor %} +{%- endmacro %} + {% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %} {% set attrs_length = attrs | length %} {% for i in range(attrs_length) %} diff --git a/paddle/fluid/prim/tests/test_static_prim.cc b/paddle/fluid/prim/tests/test_static_prim.cc index 76ce33860c57b5f1b9745f72671c5ec4f6e09e8b..d687781df2069980a85322877d9be48a757d4eb8 100644 --- a/paddle/fluid/prim/tests/test_static_prim.cc +++ b/paddle/fluid/prim/tests/test_static_prim.cc @@ -282,8 +282,11 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) { grad_sub_block); test(); std::vector muti_fw_input = test.GetMultiForwardInput("X"); - std::vector> opt_muti_fw_input = + paddle::optional> opt_muti_fw_input = test.GetOptionalMultiForwardInput("X"); + std::vector opt_inner = opt_muti_fw_input.is_initialized() + ? opt_muti_fw_input.get() + : std::vector{}; paddle::Tensor fw_out = test.GetSingleForwardOutput("Out"); paddle::Tensor* fw_out_ptr = test.GetOutputPtr(&fw_out); std::string fw_out_name = test.GetOutputName(fw_out); @@ -295,14 +298,10 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) { ASSERT_EQ( static_cast(muti_fw_input[1].impl().get())->Name(), "x1"); - ASSERT_EQ(opt_muti_fw_input.size(), static_cast(2)); - ASSERT_EQ(static_cast( - opt_muti_fw_input[0].get_ptr()->impl().get()) - ->Name(), + ASSERT_EQ(opt_inner.size(), static_cast(2)); + ASSERT_EQ(static_cast(opt_inner[0].impl().get())->Name(), "x0"); - ASSERT_EQ(static_cast( - opt_muti_fw_input[1].get_ptr()->impl().get()) - ->Name(), + ASSERT_EQ(static_cast(opt_inner[1].impl().get())->Name(), "x1"); ASSERT_EQ(&fw_out, fw_out_ptr); ASSERT_EQ(fw_out_name, "out"); diff --git a/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h b/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h index 03edd06b71f23a80a693d7f51869f69dee40733c..3e06e2b041836df84901e9eb15daec730220a24f 100644 --- a/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h +++ b/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h @@ -202,58 +202,71 @@ class CompositeGradOpMakerBase { return inputs_grads; } - std::vector> GetOptionalMultiForwardOutput( + paddle::optional> GetOptionalMultiForwardOutput( const std::string& name) { - std::vector> outputs_opt; + paddle::optional> outputs_opt; std::vector outputs_descs = this->MultiForwardOutput(name); - outputs_opt.reserve(outputs_descs.size()); + if ((outputs_descs.empty())) { + return outputs_opt; + } + std::vector outputs; + outputs.reserve(outputs_descs.size()); for (const auto& output_desc : outputs_descs) { if (output_desc) { - outputs_opt.emplace_back(paddle::make_optional( + outputs.emplace_back(paddle::Tensor( paddle::Tensor(std::make_shared(output_desc)))); } else { - outputs_opt.emplace_back( - paddle::make_optional(paddle::Tensor())); + outputs.emplace_back(paddle::Tensor(paddle::Tensor())); } } + outputs_opt = paddle::make_optional>(outputs); return outputs_opt; } - std::vector> GetOptionalMultiForwardInput( + paddle::optional> GetOptionalMultiForwardInput( const std::string& name) { - std::vector> inputs_opt; + paddle::optional> inputs_opt; std::vector inputs_descs = this->MultiForwardInput(name); - inputs_opt.reserve(inputs_descs.size()); + if ((inputs_descs.empty())) { + return inputs_opt; + } + std::vector inputs; + inputs.reserve(inputs_descs.size()); for (const auto& input_desc : inputs_descs) { if (input_desc) { - inputs_opt.emplace_back(paddle::make_optional( + inputs.emplace_back(paddle::Tensor( paddle::Tensor(std::make_shared(input_desc)))); } else { - inputs_opt.emplace_back( - paddle::make_optional(paddle::Tensor())); + inputs.emplace_back(paddle::Tensor(paddle::Tensor())); } } + inputs_opt = paddle::make_optional>(inputs); return inputs_opt; } - std::vector> GetOptionalMultiOutputGrad( + paddle::optional> GetOptionalMultiOutputGrad( const std::string& name) { - std::vector> outputs_grads; + paddle::optional> outputs_grads_opt; std::vector outputs_grads_descs = this->MultiOutputGrad(name); + if ((outputs_grads_descs.empty())) { + return outputs_grads_opt; + } + std::vector outputs_grads; outputs_grads.reserve(outputs_grads_descs.size()); for (const auto& output_grad_desc : outputs_grads_descs) { if (output_grad_desc) { - outputs_grads.emplace_back(paddle::make_optional( + outputs_grads.emplace_back(paddle::Tensor( paddle::Tensor(std::make_shared(output_grad_desc)))); } else { - outputs_grads.emplace_back( - paddle::make_optional(paddle::Tensor())); + outputs_grads.emplace_back(paddle::Tensor(paddle::Tensor())); } } - return outputs_grads; + outputs_grads_opt = + paddle::make_optional>(outputs_grads); + return outputs_grads_opt; } paddle::Tensor* GetOutputPtr(paddle::Tensor* input) { diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 3a134aa6c619f91f78fe8b185519e618a133bbab..c94b17b9c6d7c1e1fca2e8b109a0e2590fb2f55f 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -973,7 +973,10 @@ class OpTest(unittest.TestCase): % self.op_type ) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + kernel_sig, ) """ we directly return the cal_python_api value because the value is already tensor. """ diff --git a/python/paddle/fluid/tests/unittests/prim_op_test.py b/python/paddle/fluid/tests/unittests/prim_op_test.py index c6fbe23c8c4bea3ecd5de06f33e4a4c9ff18e399..f3f780b05f9bfd2b8ae7730e25222fd7ef6bbc4a 100644 --- a/python/paddle/fluid/tests/unittests/prim_op_test.py +++ b/python/paddle/fluid/tests/unittests/prim_op_test.py @@ -78,7 +78,11 @@ class OpTestUtils: @classmethod def prepare_python_api_arguments( - cls, api, op_proto_ins, op_proto_attrs, kernel_sig + cls, + api, + op_proto_ins, + op_proto_attrs, + kernel_sig, ): """map from `op proto inputs and attrs` to `api input list and api attrs dict` @@ -100,7 +104,7 @@ class OpTestUtils: def to_defaults_list(params, defaults): return [defaults[p] for p in params if p in defaults] - def parse_attri_value(name, op_inputs, op_attrs): + def parse_attri_value(name, op_inputs, op_proto_attrs): """parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty 1. if the name in op_attrs, use the op_attrs[name] 2. if the name in op_inputs, convert the op_inputs to [type of default value] @@ -155,6 +159,10 @@ class OpTestUtils: for name in attrs_sig ] results = [] + # hack support variable length parameter(such as paddle.meshgrid(*args,**kwargs) + if api_params == []: + results.append(input_arguments) + return results api_ignore_param_list = set(['name', 'dtype', 'out', 'output']) idx_of_op_proto_arguments = 0 for idx, arg_name in enumerate(api_params): @@ -178,6 +186,7 @@ class OpTestUtils: def assumption_assert_and_transform(cls, args, inp_num): """ transform inputs by the following rules: + Note: it may not be possible to distinguish list with one Tensor,you should use wrapper to distinguish. 1. [Tensor] -> Tensor 2. [Tensor, Tensor, ...] -> list of Tensors 3. None -> None @@ -376,7 +385,7 @@ class PrimForwardChecker: def check(self): if ( - self.place is paddle.fluid.libpaddle.CUDAPlace + type(self.place) is paddle.fluid.libpaddle.CUDAPlace and not paddle.is_compiled_with_cuda() ): return @@ -422,7 +431,10 @@ class PrimForwardChecker: _, ) = self.get_eager_input_attr_and_inputdict(stop_gradient=True) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + self.kernel_sig, ) inputs_sig, _, _ = self.kernel_sig args = OpTestUtils.assumption_assert_and_transform( @@ -566,7 +578,10 @@ class PrimForwardChecker: stop_gradient=True ) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, static_inputs, attrs, self.kernel_sig + self.python_api, + static_inputs, + attrs, + self.kernel_sig, ) inputs_sig, _, _ = self.kernel_sig args = OpTestUtils.assumption_assert_and_transform( @@ -634,7 +649,10 @@ class PrimForwardChecker: _, ) = self.get_eager_input_attr_and_inputdict(stop_gradient=True) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + self.kernel_sig, ) inputs_sig, _, _ = self.kernel_sig args = OpTestUtils.assumption_assert_and_transform( @@ -713,7 +731,10 @@ class PrimForwardChecker: _, ) = self.get_eager_input_attr_and_inputdict(stop_gradient=True) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + self.kernel_sig, ) inputs_sig, _, _ = self.kernel_sig args = OpTestUtils.assumption_assert_and_transform( @@ -786,7 +807,7 @@ class PrimGradChecker(PrimForwardChecker): def check(self): if ( - self.place is paddle.fluid.libpaddle.CUDAPlace + type(self.place) is paddle.fluid.libpaddle.CUDAPlace and not paddle.is_compiled_with_cuda() ): return @@ -873,7 +894,10 @@ class PrimGradChecker(PrimForwardChecker): inputs_dict, ) = self.get_eager_input_attr_and_inputdict(stop_gradient=False) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + self.kernel_sig, ) inputs_sig, _, outputs_sig = self.kernel_sig if hasattr(self.op_test, "python_out_sig"): @@ -979,7 +1003,10 @@ class PrimGradChecker(PrimForwardChecker): stop_gradient=False ) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, static_inputs, attrs, self.kernel_sig + self.python_api, + static_inputs, + attrs, + self.kernel_sig, ) inputs_sig, _, outputs_sig = self.kernel_sig if hasattr(self.op_test, "python_out_sig"): @@ -1082,7 +1109,10 @@ class PrimGradChecker(PrimForwardChecker): inputs_dict, ) = self.get_eager_input_attr_and_inputdict(stop_gradient=False) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + self.kernel_sig, ) inputs_sig, _, outputs_sig = self.kernel_sig args = OpTestUtils.assumption_assert_and_transform( @@ -1194,7 +1224,10 @@ class PrimGradChecker(PrimForwardChecker): inputs_dict, ) = self.get_eager_input_attr_and_inputdict(stop_gradient=False) args = OpTestUtils.prepare_python_api_arguments( - self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig + self.python_api, + eager_tensor_inputs, + attrs_outputs, + self.kernel_sig, ) inputs_sig, _, outputs_sig = self.kernel_sig args = OpTestUtils.assumption_assert_and_transform( diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index b6b63b14c90fae62a7a1bfc33cdf153c23780c51..e9a21ac6b097ce87e5ce6ccf64a5e254e0c99cf6 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -43,7 +43,7 @@ class TestExpandV2OpRank1(OpTest): self.expand_times = [1] def test_check_output(self): - self.check_output(check_prim=True) + self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index fdf25267d17ed5b33f1f512b44e8f242629eb652..b8df4b77de4689defb6b6df306a3b5835275b8cd 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -401,32 +401,37 @@ class TestResizeLinearOpUint8(OpTest): class TestLinearInterpOpError(unittest.TestCase): def test_error(self): - with program_guard(Program(), Program()): - - def input_shape_error(): - x1 = fluid.data(name="x1", shape=[1], dtype="float32") - out1 = paddle.nn.Upsample( - size=[256], data_format='NCW', mode='linear' - ) - out1_res = out1(x1) - - def data_format_error(): - x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") - out2 = paddle.nn.Upsample( - size=[256], data_format='NHWCD', mode='linear' - ) - out2_res = out2(x2) - - def out_shape_error(): - x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") - out3 = paddle.nn.Upsample( - size=[256, 256], data_format='NHWC', mode='linear' - ) - out3_res = out3(x3) - - self.assertRaises(ValueError, input_shape_error) - self.assertRaises(ValueError, data_format_error) - self.assertRaises(ValueError, out_shape_error) + with paddle.fluid.framework._static_guard(): + with program_guard(Program(), Program()): + + def input_shape_error(): + x1 = fluid.data(name="x1", shape=[1], dtype="float32") + out1 = paddle.nn.Upsample( + size=[256], data_format='NCW', mode='linear' + ) + out1_res = out1(x1) + + def data_format_error(): + x2 = fluid.data( + name="x2", shape=[1, 3, 128], dtype="float32" + ) + out2 = paddle.nn.Upsample( + size=[256], data_format='NHWCD', mode='linear' + ) + out2_res = out2(x2) + + def out_shape_error(): + x3 = fluid.data( + name="x3", shape=[1, 3, 128], dtype="float32" + ) + out3 = paddle.nn.Upsample( + size=[256, 256], data_format='NHWC', mode='linear' + ) + out3_res = out3(x3) + + self.assertRaises(ValueError, input_shape_error) + self.assertRaises(ValueError, data_format_error) + self.assertRaises(ValueError, out_shape_error) @unittest.skipIf(