未验证 提交 d24a402e 编写于 作者: Z zyfncg 提交者: GitHub

[cherry-pick] Adjust the Phi C++ API and yaml (#41576, #41778, #41909) (#41928)

* [PHI] Support some c++ api in paddle namespace (#41778)

* support some c++ api in paddle namespace

* change c++ api namespace in custom op

* [Phi] Support setting size of vector<Tensor> for out in yaml (#41576)

* support setting vector out size in yaml

* support setting size of vector<tensor> for out in yaml

* add data transform config for shape and size (#41909)

* fix api_gen bug
上级 ec1d2a16
......@@ -24,6 +24,114 @@ limitations under the License. */
namespace paddle {
using Tensor = experimental::Tensor;
// using several Tensor initialize functions in paddle namespace
using experimental::abs;
using experimental::acos;
using experimental::acosh;
using experimental::add;
using experimental::allclose;
using experimental::argsort;
using experimental::asin;
using experimental::asinh;
using experimental::atan;
using experimental::atan2;
using experimental::atanh;
using experimental::bernoulli;
using experimental::ceil;
using experimental::cholesky;
using experimental::cholesky_solve;
using experimental::clip;
using experimental::concat;
using experimental::conj;
using experimental::cos;
using experimental::cosh;
using experimental::cross;
using experimental::det;
using experimental::diag;
using experimental::diagonal;
using experimental::digamma;
using experimental::dist;
using experimental::divide;
using experimental::dot;
using experimental::elu;
using experimental::empty;
using experimental::empty_like;
using experimental::equal_all;
using experimental::erf;
using experimental::erfinv;
using experimental::exp;
using experimental::expand;
using experimental::expm1;
using experimental::flatten;
using experimental::flip;
using experimental::floor;
using experimental::floor_divide;
using experimental::full;
using experimental::gather;
using experimental::gather_nd;
using experimental::gelu;
using experimental::gumbel_softmax;
using experimental::imag;
using experimental::increment;
using experimental::index_sample;
using experimental::is_empty;
using experimental::isclose;
using experimental::isfinite;
using experimental::isinf;
using experimental::isnan;
using experimental::kron;
using experimental::kthvalue;
using experimental::label_smooth;
using experimental::lerp;
using experimental::lgamma;
using experimental::log;
using experimental::log10;
using experimental::log1p;
using experimental::log2;
using experimental::logit;
using experimental::masked_select;
using experimental::matmul;
using experimental::matrix_power;
using experimental::maximum;
using experimental::maxout;
using experimental::minimum;
using experimental::mode;
using experimental::multi_dot;
using experimental::multinomial;
using experimental::multiply;
using experimental::mv;
using experimental::nll_loss;
using experimental::one_hot;
using experimental::pixel_shuffle;
using experimental::poisson;
using experimental::qr;
using experimental::real;
using experimental::reciprocal;
using experimental::relu;
using experimental::reshape;
using experimental::roll;
using experimental::round;
using experimental::rsqrt;
using experimental::scatter;
using experimental::scatter_nd_add;
using experimental::selu;
using experimental::sign;
using experimental::silu;
using experimental::sin;
using experimental::sinh;
using experimental::split;
using experimental::sqrt;
using experimental::square;
using experimental::stack;
using experimental::strided_slice;
using experimental::subtract;
using experimental::tanh;
using experimental::thresholded_relu;
using experimental::tile;
using experimental::trace;
using experimental::triangular_solve;
using experimental::unbind;
using experimental::unique;
using experimental::unsqueeze;
using experimental::where;
} // namespace paddle
......@@ -20,8 +20,7 @@ limitations under the License. */
std::vector<paddle::Tensor> PhiLinearForward(const paddle::Tensor& x,
const paddle::Tensor& weight,
const paddle::Tensor& bias) {
return {
paddle::experimental::add(paddle::experimental::matmul(x, weight), bias)};
return {paddle::add(paddle::matmul(x, weight), bias)};
}
std::vector<std::vector<int64_t>> LinearInferShape(
......
......@@ -1787,6 +1787,8 @@
func : ShapeInferMeta
kernel :
func : shape, shape_sr
data_transform:
skip_transform : input
# shard_index
- api : shard_index
......@@ -1863,6 +1865,8 @@
func : SizeInferMeta
kernel :
func : size
data_transform:
skip_transform : x
- api : slice
args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
......@@ -2146,7 +2150,7 @@
data_type : x
- api : unsqueeze
args : (Tensor x, IntArray axes)
args : (Tensor x, IntArray axis)
output : Tensor(xshape), Tensor(out)
infer_meta :
func : UnsqueezeInferMeta
......
......@@ -555,9 +555,11 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
kernel_param = input_names + attr_names
input_tensor_code = ""
kernel_idx = -1
for i, input_name in enumerate(input_names):
# set input code
if input_name in kernel_param:
kernel_idx = kernel_idx + 1
trans_flag = "{}"
if input_name in self.data_transform['skip_transform']:
trans_flag = "{true}"
......@@ -566,7 +568,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
if input_name in self.optional_vars:
input_tensor_code = input_tensor_code + f"""
{code_indent} {input_trans_map[input_infos[input_name]]} {PREFIX_TENSOR_NAME}{input_name}(paddle::none);
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_ptr = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_ptr = PrepareData({input_name}, kernel.InputAt({kernel_idx}), {trans_flag});
{code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_ptr) {{
{code_indent} {PREFIX_TENSOR_NAME}{input_name} = paddle::make_optional<const phi::DenseTensor&>(*{PREFIX_TENSOR_NAME}{input_name}_ptr);
{code_indent} }}"""
......@@ -574,12 +576,12 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
else:
if self.inputs['input_info'][input_name] == "const Tensor&":
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_idx}), {trans_flag});"""
elif self.inputs['input_info'][
input_name] == "const std::vector<Tensor>&":
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_idx}), {trans_flag});
{code_indent} std::vector<const phi::DenseTensor*> {PREFIX_TENSOR_NAME}{input_name}({PREFIX_TENSOR_NAME}{input_name}_vec->size());
{code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}.size(); ++i) {{
{code_indent} {PREFIX_TENSOR_NAME}{input_name}[i] = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i);
......@@ -588,7 +590,8 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
else:
# do nothing
pass
else:
elif self.infer_meta[
'param'] is None or input_name in self.infer_meta['param']:
if input_name in self.optional_vars:
input_tensor_code = input_tensor_code + f"""
{code_indent} {input_trans_map[input_infos[input_name]]} {PREFIX_TENSOR_NAME}{input_name}(paddle::none);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册