“2a971f3084adc216950c450288bc63ea94212752”上不存在“...paddle/distributed/fleet/base/meta_optimizer_factory.py”
未验证 提交 652d12cc 编写于 作者: H HongyuJia 提交者: GitHub

[Tensor API] Support multiple Tensor C++ api (#50731)

* change phi tensor_gen->tensor_operants_gen

* [Tensor API] Support multiple Tensor C++ api
上级 01e85182
...@@ -78,30 +78,22 @@ execute_process( ...@@ -78,30 +78,22 @@ execute_process(
${static_prim_api_cc_path}) ${static_prim_api_cc_path})
message("copy tmp_xxx_prim_api to xxx_prim_api") message("copy tmp_xxx_prim_api to xxx_prim_api")
set(tmp_eager_tensor_operants_cc_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/eager/eager_tensor_operants.cc.tmp"
)
set(tmp_eager_tensor_operants_h_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/eager/eager_tensor_operants.h.tmp"
)
set(tmp_static_tensor_operants_cc_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/static/static_tensor_operants.cc.tmp"
)
set(tmp_static_tensor_operants_h_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/static/static_tensor_operants.h.tmp"
)
set(eager_tensor_operants_cc_path set(eager_tensor_operants_cc_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/eager/eager_tensor_operants.cc" ${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/eager/eager_tensor_operants.cc)
)
set(eager_tensor_operants_h_path set(eager_tensor_operants_h_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/eager/eager_tensor_operants.h" ${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/eager/eager_tensor_operants.h)
)
set(static_tensor_operants_cc_path set(static_tensor_operants_cc_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/static/static_tensor_operants.cc" ${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/static/static_tensor_operants.cc
) )
set(static_tensor_operants_h_path set(static_tensor_operants_h_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/static/static_tensor_operants.h" ${PADDLE_SOURCE_DIR}/paddle/fluid/prim/utils/static/static_tensor_operants.h
) )
set(tmp_eager_tensor_operants_cc_path ${eager_tensor_operants_cc_path}.tmp)
set(tmp_eager_tensor_operants_h_path ${eager_tensor_operants_h_path}.tmp)
set(tmp_static_tensor_operants_cc_path ${static_tensor_operants_cc_path}.tmp)
set(tmp_static_tensor_operants_h_path ${static_tensor_operants_h_path}.tmp)
set(tensor_api_yaml_path
${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/tensor_api.yaml)
message("Prim tensor operants code generator") message("Prim tensor operants code generator")
execute_process( execute_process(
...@@ -114,7 +106,7 @@ execute_process( ...@@ -114,7 +106,7 @@ execute_process(
${tmp_eager_tensor_operants_cc_path} --static_tensor_operants_header_path ${tmp_eager_tensor_operants_cc_path} --static_tensor_operants_header_path
${tmp_static_tensor_operants_h_path} --static_tensor_operants_source_path ${tmp_static_tensor_operants_h_path} --static_tensor_operants_source_path
${tmp_static_tensor_operants_cc_path} --api_prim_yaml_path ${tmp_static_tensor_operants_cc_path} --api_prim_yaml_path
${api_prim_yaml_path} ${tensor_api_yaml_path}
RESULT_VARIABLE _result) RESULT_VARIABLE _result)
if(${_result}) if(${_result})
message(FATAL_ERROR "Prim tensor operants generate failed, exiting.") message(FATAL_ERROR "Prim tensor operants generate failed, exiting.")
......
...@@ -25,6 +25,8 @@ eager_header_include = """// Generated by paddle/fluid/prim/api/auto_code_genera ...@@ -25,6 +25,8 @@ eager_header_include = """// Generated by paddle/fluid/prim/api/auto_code_genera
#include "paddle/phi/api/include/operants_base.h" #include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
""" """
...@@ -35,6 +37,8 @@ namespace paddle { ...@@ -35,6 +37,8 @@ namespace paddle {
namespace prim { namespace prim {
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using Scalar = paddle::experimental::Scalar;
using IntArray = paddle::experimental::IntArray;
using TensorOperantsBase = paddle::operants::TensorOperantsBase; using TensorOperantsBase = paddle::operants::TensorOperantsBase;
class EagerTensorOperants : public TensorOperantsBase { class EagerTensorOperants : public TensorOperantsBase {
...@@ -85,6 +89,8 @@ static_header_include = """// Generated by paddle/fluid/prim/api/auto_code_gener ...@@ -85,6 +89,8 @@ static_header_include = """// Generated by paddle/fluid/prim/api/auto_code_gener
#include "paddle/phi/api/include/operants_base.h" #include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
""" """
...@@ -95,6 +101,8 @@ namespace paddle { ...@@ -95,6 +101,8 @@ namespace paddle {
namespace prim { namespace prim {
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using Scalar = paddle::experimental::Scalar;
using IntArray = paddle::experimental::IntArray;
using TensorOperantsBase = paddle::operants::TensorOperantsBase; using TensorOperantsBase = paddle::operants::TensorOperantsBase;
class StaticTensorOperants : public TensorOperantsBase { class StaticTensorOperants : public TensorOperantsBase {
...@@ -269,8 +277,6 @@ def generate_tensor_operants_api( ...@@ -269,8 +277,6 @@ def generate_tensor_operants_api(
with open(api_prim_path, 'rt') as f: with open(api_prim_path, 'rt') as f:
api_prims = yaml.safe_load(f) api_prims = yaml.safe_load(f)
# white list temporarily
api_prims = ('add', 'subtract', 'multiply', 'divide')
for api in apis: for api in apis:
eager_api = PrimTensorAPI(api, api_prims) eager_api = PrimTensorAPI(api, api_prims)
......
...@@ -61,7 +61,7 @@ void gather_grad(const Tensor& x, ...@@ -61,7 +61,7 @@ void gather_grad(const Tensor& x,
template <typename T> template <typename T>
void tanh_grad(const Tensor& out, const Tensor& grad_out, Tensor* grad_x) { void tanh_grad(const Tensor& out, const Tensor& grad_out, Tensor* grad_x) {
if (!grad_x) return; if (!grad_x) return;
auto tmp = pow<T>(out, 2.0); auto tmp = out.pow(2.0);
tmp = scale<T>(tmp, -1.0, 1.0, true); tmp = scale<T>(tmp, -1.0, 1.0, true);
auto grad_x_tmp = grad_out * tmp; auto grad_x_tmp = grad_out * tmp;
set_output<T>(grad_x_tmp, grad_x); set_output<T>(grad_x_tmp, grad_x);
...@@ -82,8 +82,8 @@ void subtract_grad(const Tensor& x, ...@@ -82,8 +82,8 @@ void subtract_grad(const Tensor& x,
if (!reduce_dim.size()) { if (!reduce_dim.size()) {
by_pass<T>(scale_out_grad, dy); by_pass<T>(scale_out_grad, dy);
} else { } else {
auto dy_reduce_res = sum<T>( auto dy_reduce_res =
scale_out_grad, phi::vectorize(reduce_dim), y.dtype(), false); scale_out_grad.sum(phi::vectorize(reduce_dim), y.dtype(), false);
auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims())); auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims()));
set_output<T>(dy_tmp, dy); set_output<T>(dy_tmp, dy);
} }
...@@ -99,7 +99,7 @@ void subtract_grad(const Tensor& x, ...@@ -99,7 +99,7 @@ void subtract_grad(const Tensor& x,
by_pass<T>(out_grad, dx); by_pass<T>(out_grad, dx);
} else { } else {
auto dx_reduce_res = auto dx_reduce_res =
sum<T>(out_grad, phi::vectorize(reduce_dim), x.dtype(), false); out_grad.sum(phi::vectorize(reduce_dim), x.dtype(), false);
auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims())); auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims()));
set_output<T>(dx_tmp, dx); set_output<T>(dx_tmp, dx);
} }
...@@ -124,7 +124,7 @@ void add_grad(const Tensor& x, ...@@ -124,7 +124,7 @@ void add_grad(const Tensor& x,
by_pass<T>(out_grad, dy); by_pass<T>(out_grad, dy);
} else { } else {
auto dy_reduce_res = auto dy_reduce_res =
sum<T>(out_grad, phi::vectorize(reduce_dim), y.dtype(), false); out_grad.sum(phi::vectorize(reduce_dim), y.dtype(), false);
auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims())); auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims()));
set_output<T>(dy_tmp, dy); set_output<T>(dy_tmp, dy);
} }
...@@ -141,7 +141,7 @@ void add_grad(const Tensor& x, ...@@ -141,7 +141,7 @@ void add_grad(const Tensor& x,
by_pass<T>(out_grad, dx); by_pass<T>(out_grad, dx);
} else { } else {
auto dx_reduce_res = auto dx_reduce_res =
sum<T>(out_grad, phi::vectorize(reduce_dim), x.dtype(), false); out_grad.sum(phi::vectorize(reduce_dim), x.dtype(), false);
auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims())); auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims()));
set_output<T>(dx_tmp, dx); set_output<T>(dx_tmp, dx);
} }
...@@ -172,7 +172,7 @@ void sum_grad(const Tensor& x, ...@@ -172,7 +172,7 @@ void sum_grad(const Tensor& x,
} }
auto x_grad_tmp = Tensor(); auto x_grad_tmp = Tensor();
if (x_dim_size == 1) { if (x_dim_size == 1) {
x_grad_tmp = expand<T>(out_grad, IntArray(x_dim)); x_grad_tmp = out_grad.expand(IntArray(x_dim));
} else { } else {
if (!keepdim) { if (!keepdim) {
auto axis_ = std::vector<int64_t>(); auto axis_ = std::vector<int64_t>();
...@@ -184,9 +184,9 @@ void sum_grad(const Tensor& x, ...@@ -184,9 +184,9 @@ void sum_grad(const Tensor& x,
axis_ = axis.GetData(); axis_ = axis.GetData();
} }
auto out_grad_ = unsqueeze<T>(out_grad, axis_); auto out_grad_ = unsqueeze<T>(out_grad, axis_);
x_grad_tmp = expand<T>(out_grad_, IntArray(x_dim)); x_grad_tmp = out_grad_.expand(IntArray(x_dim));
} else { } else {
x_grad_tmp = expand<T>(out_grad, IntArray(x_dim)); x_grad_tmp = out_grad.expand(IntArray(x_dim));
} }
} }
...@@ -203,7 +203,7 @@ void divide_grad(const Tensor& x, ...@@ -203,7 +203,7 @@ void divide_grad(const Tensor& x,
Tensor* dy) { Tensor* dy) {
if (dy) { if (dy) {
// dy = -(x/y^2) * dout // dy = -(x/y^2) * dout
auto tmp0 = pow<T>(y, 2.0); auto tmp0 = y.pow(2.0);
auto tmp1 = x / tmp0; auto tmp1 = x / tmp0;
auto tmp2 = scale<T>(tmp1, -1.0, 0.0, true); auto tmp2 = scale<T>(tmp1, -1.0, 0.0, true);
auto dy_res = tmp2 * out_grad; auto dy_res = tmp2 * out_grad;
...@@ -214,7 +214,7 @@ void divide_grad(const Tensor& x, ...@@ -214,7 +214,7 @@ void divide_grad(const Tensor& x,
set_output<T>(dy_res, dy); set_output<T>(dy_res, dy);
} else { } else {
auto dy_reduce_res = auto dy_reduce_res =
sum<T>(dy_res, phi::vectorize(reduce_dim), y.dtype(), false); dy_res.sum(phi::vectorize(reduce_dim), y.dtype(), false);
auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims())); auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims()));
set_output<T>(dy_tmp, dy); set_output<T>(dy_tmp, dy);
} }
...@@ -233,7 +233,7 @@ void divide_grad(const Tensor& x, ...@@ -233,7 +233,7 @@ void divide_grad(const Tensor& x,
set_output<T>(dx_res, dx); set_output<T>(dx_res, dx);
} else { } else {
auto dx_reduce_res = auto dx_reduce_res =
sum<T>(dx_res, phi::vectorize(reduce_dim), x.dtype(), false); dx_res.sum(phi::vectorize(reduce_dim), x.dtype(), false);
auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims())); auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims()));
set_output<T>(dx_tmp, dx); set_output<T>(dx_tmp, dx);
} }
...@@ -267,10 +267,8 @@ void multiply_grad(const Tensor& x, ...@@ -267,10 +267,8 @@ void multiply_grad(const Tensor& x,
if (!axes.size()) { if (!axes.size()) {
set_output<T>(x_grad_unreduce, x_grad); set_output<T>(x_grad_unreduce, x_grad);
} else { } else {
auto x_grad_reduced = sum<T>(x_grad_unreduce, auto x_grad_reduced = x_grad_unreduce.sum(
phi::vectorize(axes), phi::vectorize(axes), x_grad_unreduce.dtype(), false);
x_grad_unreduce.dtype(),
false);
if (x_grad_reduced.dims().size() != x.dims().size()) { if (x_grad_reduced.dims().size() != x.dims().size()) {
x_grad_reduced = reshape<T>(x_grad_reduced, x.shape()); x_grad_reduced = reshape<T>(x_grad_reduced, x.shape());
} }
...@@ -287,10 +285,8 @@ void multiply_grad(const Tensor& x, ...@@ -287,10 +285,8 @@ void multiply_grad(const Tensor& x,
if (!axes.size()) { if (!axes.size()) {
set_output<T>(y_grad_unreduce, y_grad); set_output<T>(y_grad_unreduce, y_grad);
} else { } else {
auto y_grad_reduced = sum<T>(y_grad_unreduce, auto y_grad_reduced = y_grad_unreduce.sum(
phi::vectorize(axes), phi::vectorize(axes), y_grad_unreduce.dtype(), false);
y_grad_unreduce.dtype(),
false);
if (y_grad_reduced.dims().size() != y.dims().size()) { if (y_grad_reduced.dims().size() != y.dims().size()) {
y_grad_reduced = reshape<T>(y_grad_reduced, y.shape()); y_grad_reduced = reshape<T>(y_grad_reduced, y.shape());
} }
...@@ -314,7 +310,7 @@ void expand_grad(const Tensor& x, ...@@ -314,7 +310,7 @@ void expand_grad(const Tensor& x,
if (!axes.size()) { if (!axes.size()) {
by_pass<T>(out_grad, x_grad); by_pass<T>(out_grad, x_grad);
} else { } else {
auto reduced = sum<T>(out_grad, phi::vectorize(axes), x.dtype(), false); auto reduced = out_grad.sum(phi::vectorize(axes), x.dtype(), false);
if (reduced.dims().size() != x.dims().size()) { if (reduced.dims().size() != x.dims().size()) {
reduced = reshape<T>(reduced, x.shape()); reduced = reshape<T>(reduced, x.shape());
} }
......
...@@ -47,6 +47,16 @@ namespace paddle { ...@@ -47,6 +47,16 @@ namespace paddle {
namespace experimental { namespace experimental {
class Tensor;
template <typename T>
class ScalarBase;
using Scalar = paddle::experimental::ScalarBase<Tensor>;
template <typename T>
class IntArrayBase;
using IntArray = paddle::experimental::IntArrayBase<Tensor>;
class AbstractAutogradMeta { class AbstractAutogradMeta {
public: public:
// No AbstractAutogradMeta should be created // No AbstractAutogradMeta should be created
...@@ -646,12 +656,35 @@ class PADDLE_API Tensor final { ...@@ -646,12 +656,35 @@ class PADDLE_API Tensor final {
*/ */
std::string name_{""}; std::string name_{""};
public:
// Tensor C++ APIs // Tensor C++ APIs
// Example: Tensor add(const Tensor& other) const; // Example: Tensor add(const Tensor& other) const;
Tensor add(const Tensor& y) const; Tensor add(const Tensor& y) const;
Tensor divide(const Tensor& y) const; Tensor divide(const Tensor& y) const;
Tensor multiply(const Tensor& y) const; Tensor multiply(const Tensor& y) const;
Tensor subtract(const Tensor& y) const; Tensor subtract(const Tensor& y) const;
Tensor exp() const;
Tensor floor() const;
Tensor gather_nd(const Tensor& index) const;
Tensor log() const;
Tensor pow(const Scalar& y) const;
Tensor roll(const IntArray& shifts, const std::vector<int64_t>& axis) const;
Tensor scatter(const Tensor& index,
const Tensor& updates,
bool overwrite) const;
Tensor scatter_nd_add(const Tensor& index, const Tensor& updates) const;
Tensor abs() const;
Tensor assign() const;
Tensor elementwise_pow(const Tensor& y) const;
Tensor expand(const IntArray& shape) const;
Tensor matmul(const Tensor& y, bool transpose_x, bool transpose_y) const;
Tensor max(const IntArray& axis, bool keepdim) const;
Tensor maximum(const Tensor& y) const;
Tensor minimum(const Tensor& y) const;
Tensor prod(const IntArray& dims, bool keep_dim, bool reduce_all) const;
Tensor scale(const Scalar& scale, float bias, bool bias_after_scale) const;
Tensor sum(const IntArray& axis, DataType dtype, bool keepdim) const;
Tensor tile(const IntArray& repeat_times) const;
}; };
} // namespace experimental } // namespace experimental
......
...@@ -98,8 +98,10 @@ set(wrapped_infermeta_source_file ...@@ -98,8 +98,10 @@ set(wrapped_infermeta_source_file
${CMAKE_SOURCE_DIR}/paddle/phi/infermeta/generated.cc) ${CMAKE_SOURCE_DIR}/paddle/phi/infermeta/generated.cc)
# tensor and tensor operants file # tensor and tensor operants file
set(tensor_api_yaml_path
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/tensor_api.yaml)
set(tensor_gen_file set(tensor_gen_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/tensor_gen.py) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/tensor_operants_gen.py)
set(operants_base_file set(operants_base_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/include/operants_base.h) ${CMAKE_SOURCE_DIR}/paddle/phi/api/include/operants_base.h)
set(tensor_api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/tensor_api.cc) set(tensor_api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/tensor_api.cc)
...@@ -249,6 +251,7 @@ execute_process( ...@@ -249,6 +251,7 @@ execute_process(
--phi_tensor_operants_source_path ${phi_tensor_operants_source_file_tmp} --phi_tensor_operants_source_path ${phi_tensor_operants_source_file_tmp}
--operants_manager_header_path ${operants_manager_header_file_tmp} --operants_manager_header_path ${operants_manager_header_file_tmp}
--operants_manager_source_path ${operants_manager_source_file_tmp} --operants_manager_source_path ${operants_manager_source_file_tmp}
--tensor_api_yaml_path ${tensor_api_yaml_path}
RESULT_VARIABLE _result) RESULT_VARIABLE _result)
if(${_result}) if(${_result})
message(FATAL_ERROR "tensor codegen failed, exiting.") message(FATAL_ERROR "tensor codegen failed, exiting.")
......
...@@ -30,11 +30,13 @@ inplace_optional_out_type_map = { ...@@ -30,11 +30,13 @@ inplace_optional_out_type_map = {
indent = " " indent = " "
operants_base_include = """// Generated by paddle/phi/api/yaml/generator/tensor_gen.py operants_base_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py
#pragma once #pragma once
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/int_array.h"
""" """
...@@ -44,6 +46,8 @@ namespace paddle { ...@@ -44,6 +46,8 @@ namespace paddle {
namespace operants { namespace operants {
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using Scalar = paddle::experimental::Scalar;
using IntArray = paddle::experimental::IntArray;
class TensorOperantsBase { class TensorOperantsBase {
public: public:
...@@ -58,7 +62,7 @@ operants_base_end = """}; ...@@ -58,7 +62,7 @@ operants_base_end = """};
""" """
tensor_api_source_include = """// Generated by paddle/phi/api/yaml/generator/tensor_gen.py tensor_api_source_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
...@@ -96,12 +100,14 @@ tensor_api_source_end = """ ...@@ -96,12 +100,14 @@ tensor_api_source_end = """
""" """
operants_header_include = """// Generated by paddle/phi/api/yaml/generator/tensor_gen.py operants_header_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py
#pragma once #pragma once
#include "paddle/phi/api/include/operants_base.h" #include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
""" """
...@@ -111,6 +117,9 @@ namespace paddle { ...@@ -111,6 +117,9 @@ namespace paddle {
namespace operants { namespace operants {
using Scalar = paddle::experimental::Scalar;
using IntArray = paddle::experimental::IntArray;
class PhiTensorOperants : public TensorOperantsBase { class PhiTensorOperants : public TensorOperantsBase {
private: private:
DISABLE_COPY_AND_ASSIGN(PhiTensorOperants); DISABLE_COPY_AND_ASSIGN(PhiTensorOperants);
...@@ -128,7 +137,7 @@ operants_header_end = """}; ...@@ -128,7 +137,7 @@ operants_header_end = """};
""" """
operants_source_include = """// Generated by paddle/phi/api/yaml/generator/tensor_gen.py operants_source_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py
#include "paddle/phi/api/include/tensor_operants.h" #include "paddle/phi/api/include/tensor_operants.h"
...@@ -151,12 +160,14 @@ operants_source_end = """ ...@@ -151,12 +160,14 @@ operants_source_end = """
""" """
operants_manager_header_include = """// Generated by paddle/phi/api/yaml/generator/tensor_gen.py operants_manager_header_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py
#pragma once #pragma once
#include "paddle/phi/api/include/operants_base.h" #include "paddle/phi/api/include/operants_base.h"
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/macros.h" #include "paddle/phi/core/macros.h"
""" """
...@@ -165,6 +176,8 @@ operants_manager_header_start = """ ...@@ -165,6 +176,8 @@ operants_manager_header_start = """
namespace paddle { namespace paddle {
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using Scalar = paddle::experimental::Scalar;
using IntArray = paddle::experimental::IntArray;
using TensorOperantsBase = paddle::operants::TensorOperantsBase; using TensorOperantsBase = paddle::operants::TensorOperantsBase;
/** /**
...@@ -222,7 +235,7 @@ operants_manager_header_end = """}; ...@@ -222,7 +235,7 @@ operants_manager_header_end = """};
""" """
operants_manager_source_include = """// Generated by paddle/phi/api/yaml/generator/tensor_gen.py operants_manager_source_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py
#include "paddle/phi/api/include/operants_manager.h" #include "paddle/phi/api/include/operants_manager.h"
...@@ -271,11 +284,17 @@ class OperantsAPI(ForwardAPI): ...@@ -271,11 +284,17 @@ class OperantsAPI(ForwardAPI):
""" """
def get_define_args_without_first_tensor(self, inplace_flag=False): def get_define_args_without_first_tensor(self, inplace_flag=False):
# NOTE(HongyuJia): consider vector<Tensor> becomes first input argument. func_name = self.get_api_func_name()
define_args = self.get_input_tensor_args(inplace_flag) define_args = self.get_input_tensor_args(inplace_flag)
assert ( assert len(define_args) >= 1, (
len(define_args) > 1 "Error! Api %s has no Tensor inputs" % func_name
), "Can't use tensor api without Tensor inputs" )
first_input_type = " ".join(define_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s"
% (func_name, first_input_type)
)
for name in self.attrs['names']: for name in self.attrs['names']:
define_args.append(self.attrs['attr_info'][name][0] + ' ' + name) define_args.append(self.attrs['attr_info'][name][0] + ' ' + name)
# remove first Tensor argument # remove first Tensor argument
...@@ -283,23 +302,26 @@ class OperantsAPI(ForwardAPI): ...@@ -283,23 +302,26 @@ class OperantsAPI(ForwardAPI):
def gene_tensor_api_implementation(self): def gene_tensor_api_implementation(self):
func_name = self.get_api_func_name() func_name = self.get_api_func_name()
assert ( assert len(self.inputs['names']) >= 1, (
len(self.inputs['names']) > 1 "Error! Api %s has no Tensor inputs" % func_name
), "Can't use tensor api without Tensor inputs" )
# remove first Tensor argument # remove first Tensor argument
func_args = self.inputs['names'][1:] + self.attrs['names'] func_args = self.inputs['names'][1:] + self.attrs['names']
func_args_code = ", ".join(func_args) if len(func_args) > 0:
func_args_code = ", ".join([""] + func_args)
else:
func_args_code = ""
# func decalaration # func decalaration
if func_name[-1] != '_': if func_name[-1] != '_':
return f""" return f"""
{self.get_return_type()} Tensor::{func_name}({self.get_define_args_without_first_tensor()}) const {{ {self.get_return_type()} Tensor::{func_name}({self.get_define_args_without_first_tensor()}) const {{
{indent}return paddle::OperantsManager::Instance().{func_name}(static_cast<const Tensor &>(*this), {func_args_code}); {indent}return paddle::OperantsManager::Instance().{func_name}(static_cast<const Tensor &>(*this){func_args_code});
}} }}
""" """
else: else:
return f""" return f"""
{self.get_return_type(inplace_flag=True)} Tensor::{func_name}({self.get_define_args_without_first_tensor(inplace_flag=True)}) const {{ {self.get_return_type(inplace_flag=True)} Tensor::{func_name}({self.get_define_args_without_first_tensor(inplace_flag=True)}) const {{
{indent}return paddle::OperantsManager::Instance().{func_name}(static_cast<const Tensor &>(*this), {func_args_code}); {indent}return paddle::OperantsManager::Instance().{func_name}(static_cast<const Tensor &>(*this){func_args_code});
}} }}
""" """
...@@ -394,6 +416,7 @@ def generate_tensor_operants_api( ...@@ -394,6 +416,7 @@ def generate_tensor_operants_api(
operants_source_path, operants_source_path,
operants_manager_header_path, operants_manager_header_path,
operants_manager_source_path, operants_manager_source_path,
tensor_api_yaml_path,
): ):
apis = [] apis = []
...@@ -423,8 +446,8 @@ def generate_tensor_operants_api( ...@@ -423,8 +446,8 @@ def generate_tensor_operants_api(
operants_manager_source_file.write(operants_manager_source_include) operants_manager_source_file.write(operants_manager_source_include)
operants_manager_source_file.write(operants_manager_source_start) operants_manager_source_file.write(operants_manager_source_start)
# white list temporarily with open(tensor_api_yaml_path, 'rt') as f:
api_prims = ('add', 'subtract', 'multiply', 'divide') api_prims = yaml.safe_load(f)
for api in apis: for api in apis:
operants_api = OperantsAPI(api, api_prims) operants_api = OperantsAPI(api, api_prims)
...@@ -506,6 +529,12 @@ def main(): ...@@ -506,6 +529,12 @@ def main():
default='paddle/phi/api/lib/operants_manager.cc', default='paddle/phi/api/lib/operants_manager.cc',
) )
parser.add_argument(
'--tensor_api_yaml_path',
help='path to tensor_api yaml file',
default='paddle/phi/api/yaml/tensor_api.yaml',
)
options = parser.parse_args() options = parser.parse_args()
api_yaml_path = options.api_yaml_path api_yaml_path = options.api_yaml_path
...@@ -515,6 +544,7 @@ def main(): ...@@ -515,6 +544,7 @@ def main():
operants_source_path = options.phi_tensor_operants_source_path operants_source_path = options.phi_tensor_operants_source_path
operants_manager_header_path = options.operants_manager_header_path operants_manager_header_path = options.operants_manager_header_path
operants_manager_source_path = options.operants_manager_source_path operants_manager_source_path = options.operants_manager_source_path
tensor_api_yaml_path = options.tensor_api_yaml_path
generate_tensor_operants_api( generate_tensor_operants_api(
api_yaml_path, api_yaml_path,
...@@ -524,6 +554,7 @@ def main(): ...@@ -524,6 +554,7 @@ def main():
operants_source_path, operants_source_path,
operants_manager_header_path, operants_manager_header_path,
operants_manager_source_path, operants_manager_source_path,
tensor_api_yaml_path,
) )
......
- unsqueeze
- pow
- exp
- scale
- multiply
- matmul
- expand
- divide
- sum
- add
- abs
- assign
- elementwise_pow
- floor
- gather_nd
- log
- max
- maximum
- minimum
- prod
- roll
- scatter
- scatter_nd_add
- tile
- subtract
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册