diff --git a/paddle/phi/api/lib/api_gen_utils.cc b/paddle/phi/api/lib/api_gen_utils.cc index 9d72a23cb5485e43209c7af53c9be6b3f2c11783..14d7fcdee634d66f1544d5501a4dec3617334686 100644 --- a/paddle/phi/api/lib/api_gen_utils.cc +++ b/paddle/phi/api/lib/api_gen_utils.cc @@ -31,14 +31,14 @@ paddle::optional TensorToDenseTensor( return nullptr; } -std::unique_ptr> TensorToDenseTensor( +std::unique_ptr> TensorToDenseTensor( const std::vector& tensors) { - auto pt_tensors = std::make_unique>(); + auto pt_tensors = std::make_unique>(); pt_tensors->reserve(tensors.size()); for (const auto& t : tensors) { pt_tensors->push_back( - *std::dynamic_pointer_cast(t.impl())); + std::dynamic_pointer_cast(t.impl()).get()); } return pt_tensors; diff --git a/paddle/phi/api/lib/api_gen_utils.h b/paddle/phi/api/lib/api_gen_utils.h index fe934481bcdc22d9486ac2b651b846078f84f59b..ff71e5bfebf289b218f83515e1bb7a0949422d52 100644 --- a/paddle/phi/api/lib/api_gen_utils.h +++ b/paddle/phi/api/lib/api_gen_utils.h @@ -35,7 +35,7 @@ std::shared_ptr TensorToDenseTensor(const Tensor& tensor); paddle::optional TensorToDenseTensor( const paddle::optional& tensor); -std::unique_ptr> TensorToDenseTensor( +std::unique_ptr> TensorToDenseTensor( const std::vector& tensors); std::shared_ptr TensorToSelectedRows(const Tensor& tensor); diff --git a/paddle/phi/api/yaml/generator/api_base.py b/paddle/phi/api/yaml/generator/api_base.py index 833eadcf9d110008eeb87bfb474ad0420ccdb129..f93046d26279fa3094fee9621cbe87b00b3b7d07 100644 --- a/paddle/phi/api/yaml/generator/api_base.py +++ b/paddle/phi/api/yaml/generator/api_base.py @@ -582,18 +582,18 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d trans_flag = "{false, true}" if input_name in self.optional_vars: input_tensor_code = input_tensor_code + f""" -{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});""" +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});""" else: if self.inputs['input_info'][ input_name] == "const Tensor&": input_tensor_code = input_tensor_code + f""" -{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});""" +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});""" elif self.inputs['input_info'][ input_name] == "const std::vector&": input_tensor_code = input_tensor_code + f""" -{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag}); +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag}); {code_indent} std::vector {PREFIX_TENSOR_NAME}{input_name}({PREFIX_TENSOR_NAME}{input_name}_vec->size()); {code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}.size(); ++i) {{ {code_indent} {PREFIX_TENSOR_NAME}{input_name}[i] = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i); @@ -612,7 +612,13 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} paddle::optional {PREFIX_TENSOR_NAME}{input_name} = {input_name} ? paddle::optional(*{input_name}->impl()) : paddle::none;""" else: - input_tensor_code = input_tensor_code + f""" + if self.inputs['input_info'][ + input_name] == "const std::vector&": + input_tensor_code = input_tensor_code + f""" +{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_uq_ptr = TensorToDenseTensor({input_name}); +{code_indent} const auto& {PREFIX_TENSOR_NAME}{input_name} = *{PREFIX_TENSOR_NAME}{input_name}_uq_ptr;""" + else: + input_tensor_code = input_tensor_code + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = {input_name}.impl();""" kernel_args = ["*dev_ctx"] diff --git a/paddle/phi/api/yaml/legacy_api.yaml b/paddle/phi/api/yaml/legacy_api.yaml index 53514ca56691f34489a4cf9dee6c5e2a95237cc6..a36a48d505fc510e21c784ebeb4cf09ac493267c 100644 --- a/paddle/phi/api/yaml/legacy_api.yaml +++ b/paddle/phi/api/yaml/legacy_api.yaml @@ -2513,6 +2513,15 @@ output : Tensor invoke : full_like(x, 0, dtype, place) +- api: broadcast_tensors + args: (Tensor[] x) + output: Tensor[]{x.size()} + infer_meta: + func: BroadcastTensorsInferMeta + kernel: + func: broadcast_tensors + backward: broadcast_tensors_grad + # eig - api: eig args: (Tensor x) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index bbcb6e347e516e9e164671188f3f360b0ce0b546..77fc49f29b2f3fec52d7d3e19d8bcd88cef375d4 100644 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -280,6 +280,18 @@ func : brelu_grad inplace : (out_grad -> x_grad) +- backward_api : broadcast_tensors_grad + forward : broadcast_tensors (Tensor[] x) -> Tensor[](out) + args : (Tensor[] x, Tensor[] out_grad) + output : Tensor[](x_grad) + infer_meta : + func : UnchangedMultiInferMeta + param : [x] + kernel : + func : broadcast_tensors_grad + param : [out_grad] + no_need_buffer : x + - backward_api : cast_grad forward : cast (Tensor x, DataType out_dtype) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py index 20e0ead8b3fa3aeec31f81833b06de43c79bbedb..63b63d3ade22375c193106608c2540cbf8f5bf79 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py @@ -99,26 +99,49 @@ class TestCPUBroadcastTensorsOp(OpTest): ] self.set_place() self.set_dtypes() + self.python_api = paddle.broadcast_tensors - def run_test(self, test_func, args): + def run_dual_test(self, test_func, args): for dtype in self.dtypes: for gen_func in self.test_gen_func_list: self.inputs, self.outputs = gen_func(dtype) - test_func(**args) + if len(self.outputs["Out"]) < 3: + self.python_out_sig = [ + f"out{i}" for i in range(len(self.outputs["Out"])) + ] + test_func(**args) + + def run_triple_in_test(self, test_func, args): + for dtype in self.dtypes: + self.inputs, self.outputs = self.test_gen_func_list[2](dtype) + self.python_out_sig = [ + f"out{i}" for i in range(len(self.outputs["Out"])) + ] + test_func(**args) def test_check_output(self): - self.run_test(self.check_output_with_place, { + self.run_dual_test(self.check_output_with_place, { "place": self.place, - "atol": 1e-1 + "atol": 1e-1, + "check_eager": True }) def test_check_grad_normal(self): - self.run_test( + self.run_dual_test( self.check_grad_with_place, { "place": self.place, "inputs_to_check": ['x0', 'x1'], "output_names": ['out0', 'out1'], "max_relative_error": 0.05, + "check_eager": True + }) + self.run_triple_in_test( + self.check_grad_with_place, { + "place": self.place, + "inputs_to_check": ['x0', 'x1', 'x2'], + "output_names": ['out0', 'out1', "out2"], + "max_relative_error": 0.05, + "check_eager": True }) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 8d7d91e2f2ec967abd9aed23beccb2206b66d53b..5b4f61ce872f53e5117e92f5aa3d8233e91dddb5 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1132,7 +1132,9 @@ def broadcast_tensors(input, name=None): """ num_inputs = len(input) - if paddle.in_dynamic_mode(): + if paddle.framework.in_dygraph_mode(): + return _C_ops.final_state_broadcast_tensors(input) + if paddle.framework._non_static_mode(): return _C_ops.broadcast_tensors(input, num_inputs) check_type(input, 'input', (list, tuple), 'broadcast_tensors')