未验证 提交 7cbb433a 编写于 作者: X xiaoguoguo626807 提交者: GitHub

[New IR] add Add_n op description and python api (#56080)

* refine program translator

* fix warning: not override

* fix bug

* merge new modifications

* modify by reviews

* resolve conflicts

* resolve conflicts

* fix

* fix

* fix conflicts

* pseudocode of backward

* modify test

* modify register op

* clear other code

* modify ci build bug

* reply review comments

* reply review comments

---------
Co-authored-by: Nkangguangli <kangguangli@hotmail.com>
上级 1717247f
...@@ -51,6 +51,7 @@ H_FILE_TEMPLATE = """#ifdef GET_OP_LIST ...@@ -51,6 +51,7 @@ H_FILE_TEMPLATE = """#ifdef GET_OP_LIST
#include "paddle/fluid/ir/trait/inplace.h" #include "paddle/fluid/ir/trait/inplace.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
#include "paddle/fluid/ir/dialect/pd_manual_op.h"
{input} {input}
...@@ -151,6 +152,8 @@ scalar_type_maps = { ...@@ -151,6 +152,8 @@ scalar_type_maps = {
'bool': 'ir::BoolAttribute', 'bool': 'ir::BoolAttribute',
} }
_NO_NEED_GEN_OPS = {'add_n'}
def to_phi_and_fluid_op_name(op_item): def to_phi_and_fluid_op_name(op_item):
# Templat: - op : phi_name (fluid_name) # Templat: - op : phi_name (fluid_name)
...@@ -743,6 +746,8 @@ def OpGenerator( ...@@ -743,6 +746,8 @@ def OpGenerator(
# If op has inplace info, we will generate inplace op and non-inplace op. # If op has inplace info, we will generate inplace op and non-inplace op.
for op_name in op_info.op_phi_name: for op_name in op_info.op_phi_name:
if op_name in _NO_NEED_GEN_OPS:
continue
op_class_name = to_pascal_case(op_name) + "Op" op_class_name = to_pascal_case(op_name) + "Op"
op_dialect_name = dialect_name + "." + op_name op_dialect_name = dialect_name + "." + op_name
......
...@@ -16,9 +16,19 @@ ...@@ -16,9 +16,19 @@
#include "paddle/fluid/ir/dialect/pd_dialect.h" #include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/ir/core/builder.h" #include "paddle/ir/core/builder.h"
#include "paddle/ir/core/builtin_op.h"
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
ir::OpResult add_n(std::vector<ir::OpResult> x) {
auto combine_op =
APIBuilder::Instance().GetBuilder()->Build<ir::CombineOp>(x);
paddle::dialect::AddNOp add_n_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::AddNOp>(
combine_op.out());
return add_n_op.out();
}
ir::OpResult mean(ir::OpResult x, std::vector<int64_t> axis, bool keepdim) { ir::OpResult mean(ir::OpResult x, std::vector<int64_t> axis, bool keepdim) {
paddle::dialect::MeanOp mean_op = paddle::dialect::MeanOp mean_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::MeanOp>( APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::MeanOp>(
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
ir::OpResult add_n(std::vector<ir::OpResult> x);
ir::OpResult mean(ir::OpResult x, ir::OpResult mean(ir::OpResult x,
std::vector<int64_t> axis = {}, std::vector<int64_t> axis = {},
......
...@@ -102,10 +102,13 @@ void PaddleDialect::initialize() { ...@@ -102,10 +102,13 @@ void PaddleDialect::initialize() {
// NOTE(zhangbo9674): GET_OP_LIST is defined in pd_op.h which is // NOTE(zhangbo9674): GET_OP_LIST is defined in pd_op.h which is
// generated by op_gen.py, see details in // generated by op_gen.py, see details in
// paddle/fluid/ir/dialect/CMakeLists.txt. // paddle/fluid/ir/dialect/CMakeLists.txt.
// NOTE(Ruting)GET_MANUAL_OP_LIST is define in pd_manual_op.h"
// use RegisterOps when list has more than two ops.
RegisterOps< RegisterOps<
#define GET_OP_LIST #define GET_OP_LIST
#include "paddle/fluid/ir/dialect/pd_op.h" // NOLINT #include "paddle/fluid/ir/dialect/pd_op.h" // NOLINT
>(); >();
RegisterOp<paddle::dialect::AddNOp>();
RegisterInterfaces<ParameterConvertInterface>(); RegisterInterfaces<ParameterConvertInterface>();
} }
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_manual_op.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_op.h"
#include "paddle/ir/core/builtin_type.h"
#include "paddle/ir/core/ir_context.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/multiary.h"
namespace paddle {
namespace dialect {
OpInfoTuple AddNOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
OpInputInfo("inputs",
"ir::VectorType<paddle::dialect::DenseTensorType>",
false,
false,
false)};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {};
std::vector<paddle::dialect::OpOutputInfo> outputs = {
OpOutputInfo("out", "paddle::dialect::DenseTensorType", false, false)};
paddle::dialect::OpRunTimeInfo run_time_info =
OpRunTimeInfo("", {""}, {""}, {""}, {""}, {}, {});
return std::make_tuple(inputs, attributes, outputs, run_time_info);
}
void AddNOp::Verify() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: AddNOp.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
PADDLE_ENFORCE_EQ(
input_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type = (*this)->operand(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
} else {
PADDLE_ENFORCE(
(*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
}
VLOG(4) << "Verifying attributes:";
{
// Attributes num is 0, not need to check attributes type.
}
VLOG(4) << "Verifying outputs:";
{
auto output_size = num_results();
PADDLE_ENFORCE_EQ(
output_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output."));
}
VLOG(4) << "End Verifying for: AddNOp.";
}
void AddNOp::Build(ir::Builder &builder, // NOLINT
ir::OperationArgument &argument, // NOLINT
ir::OpResult inputs) {
VLOG(4) << "Builder construction inputs";
std::vector<ir::OpResult> argument_inputs = {inputs};
argument.AddOperands(argument_inputs.begin(), argument_inputs.end());
VLOG(4) << "Builder construction attributes";
VLOG(4) << "Builder construction outputs";
ir::VectorType x = inputs.type().dyn_cast<ir::VectorType>();
(void)x;
std::vector<phi::DenseTensor> vec_dense_x;
for (size_t i = 0; i < x.size(); i++) {
vec_dense_x.push_back(phi::DenseTensor(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
phi::DenseTensorMeta(
TransToPhiDataType(
x[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
x[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
x[i].dyn_cast<paddle::dialect::DenseTensorType>().data_layout(),
x[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
x[i].dyn_cast<paddle::dialect::DenseTensorType>().offset())));
}
std::vector<phi::MetaTensor> vec_meta_x;
for (size_t i = 0; i < vec_dense_x.size(); i++) {
vec_meta_x.push_back(phi::MetaTensor(&vec_dense_x[i]));
}
std::vector<const phi::MetaTensor *> meta_x;
for (size_t i = 0; i < static_cast<size_t>(vec_meta_x.size()); i++) {
meta_x.push_back(&vec_meta_x[i]);
}
phi::DenseTensor dense_out;
phi::MetaTensor meta_out(&dense_out);
phi::AddNInferMeta(meta_x, &meta_out);
std::vector<ir::Type> argument_outputs;
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get(
ir::IrContext::Instance(),
TransToIrDataType(dense_out.dtype()),
dense_out.dims(),
dense_out.layout(),
dense_out.lod(),
dense_out.offset());
argument_outputs.push_back(out_dense_tensor_type);
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end());
}
void AddNOp::InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::AddNInferMeta);
fn(infer_meta);
}
} // namespace dialect
} // namespace paddle
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef GET_MANUAL_OP_LIST
#undef GET_MANUAL_OP_LIST
paddle::dialect::AddNOp
#else
#pragma once
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/ir/dialect/op_yaml_info_util.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/infermeta.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/builder.h"
#include "paddle/ir/core/op_base.h"
#include "paddle/ir/core/operation_utils.h"
#include "paddle/phi/core/infermeta_utils.h"
namespace paddle {
namespace dialect {
class AddNOp : public ir::Op<AddNOp, OpYamlInfoInterface> {
public:
using Op::Op;
static const char *name() { return "pd.add_n"; }
static constexpr const char **attributes_name = nullptr;
static constexpr uint32_t attributes_num = 0;
static OpInfoTuple GetOpInfo();
static void Build(ir::Builder &builder, // NOLINT
ir::OperationArgument &argument, // NOLINT
ir::OpResult inputs);
void Verify();
ir::Value inputs() { return operand_source(0); }
ir::OpResult out() { return result(0); }
ir::Attribute attribute(const std::string &name) {
{
PADDLE_ENFORCE(
attributes().count(name) > 0,
phi::errors::PreconditionNotMet("Attribute is not exist."));
return attributes().at(name);
}
}
template <typename T>
T attribute(const std::string &name) {
{
PADDLE_ENFORCE(
attributes().count(name) > 0 && attributes().at(name).isa<T>(),
phi::errors::PreconditionNotMet("Attribute is not right."));
return attributes().at(name).dyn_cast<T>();
}
}
static void InferMeta(phi::InferMetaContext *infer_meta);
};
} // namespace dialect
} // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp)
#endif
...@@ -1469,6 +1469,63 @@ ir::OpResult CastPyArg2OpResult(const std::string& op_type, ...@@ -1469,6 +1469,63 @@ ir::OpResult CastPyArg2OpResult(const std::string& op_type,
} }
} }
std::vector<ir::OpResult> CastPyArg2VectorOfOpResult(const std::string& op_type,
PyObject* obj,
size_t arg_pos) {
std::vector<ir::OpResult> result_list;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
if (PyObject_TypeCheck(item, g_ir_opresult_pytype)) {
result_list.emplace_back(::pybind11::handle(item).cast<ir::OpResult>());
} else if (item == Py_None) {
continue;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"vector<OpResult>, but got vector<%s>",
op_type,
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)
->tp_name)); // NOLINT
}
}
} else if (PyTuple_Check(obj)) {
Py_ssize_t len = PyTuple_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyTuple_GetItem(obj, i);
if (PyObject_TypeCheck(item, g_ir_opresult_pytype)) {
result_list.emplace_back(::pybind11::handle(item).cast<ir::OpResult>());
} else if (item == Py_None) {
continue;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"vector<OpResult>, but got vector<%s>",
op_type,
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(item->ob_type)
->tp_name)); // NOLINT
}
}
} else if (PyObject_TypeCheck(obj, g_ir_opresult_pytype)) {
return {::pybind11::handle(obj).cast<ir::OpResult>()};
} else if (obj == Py_None) {
return {};
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"Vector<>, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
return result_list;
}
paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos) { ssize_t arg_pos) {
......
...@@ -79,6 +79,9 @@ std::vector<float> CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos); ...@@ -79,6 +79,9 @@ std::vector<float> CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos);
ir::OpResult CastPyArg2OpResult(const std::string& op_type, ir::OpResult CastPyArg2OpResult(const std::string& op_type,
PyObject* obj, PyObject* obj,
size_t arg_pos); size_t arg_pos);
std::vector<ir::OpResult> CastPyArg2VectorOfOpResult(const std::string& op_type,
PyObject* obj,
size_t arg_pos);
std::vector<std::vector<size_t>> CastPyArg2VectorOfVectorOfSize_t( std::vector<std::vector<size_t>> CastPyArg2VectorOfVectorOfSize_t(
PyObject* obj, size_t arg_pos); PyObject* obj, size_t arg_pos);
framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
......
...@@ -20,6 +20,10 @@ ...@@ -20,6 +20,10 @@
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
static PyObject *add_n(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_add_n(self, args, kwargs);
}
static PyObject *mean(PyObject *self, PyObject *args, PyObject *kwargs) { static PyObject *mean(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_mean(self, args, kwargs); return static_api_mean(self, args, kwargs);
} }
...@@ -36,7 +40,11 @@ static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) { ...@@ -36,7 +40,11 @@ static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_divide(self, args, kwargs); return static_api_divide(self, args, kwargs);
} }
static PyMethodDef OpsAPI[] = {{"mean", static PyMethodDef OpsAPI[] = {{"add_n",
(PyCFunction)(void (*)(void))add_n,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for add_n."},
{"mean",
(PyCFunction)(void (*)(void))mean, (PyCFunction)(void (*)(void))mean,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
"C++ interface function for mean."}, "C++ interface function for mean."},
......
...@@ -22,7 +22,24 @@ ...@@ -22,7 +22,24 @@
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
PyObject *static_api_add_n(PyObject *self, PyObject *args, PyObject *kwargs) {
try {
VLOG(6) << "Add add_n op into program";
VLOG(8) << "args count: " << (PyTuple_Size(args) / 2);
// Get OpResult from args
PyObject *x_obj = PyTuple_GET_ITEM(args, 0);
auto x = CastPyArg2VectorOfOpResult("add_n", x_obj, 0);
// Parse Attributes if needed
// Call ir static api
auto out = paddle::dialect::add_n(x);
return ToPyObject(out);
} catch (...) {
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
}
PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs) { PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs) {
try { try {
VLOG(6) << "Add mean op into program"; VLOG(6) << "Add mean op into program";
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
PyObject *static_api_add_n(PyObject *self, PyObject *args, PyObject *kwargs);
PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs); PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs);
PyObject *static_api_sum(PyObject *self, PyObject *args, PyObject *kwargs); PyObject *static_api_sum(PyObject *self, PyObject *args, PyObject *kwargs);
PyObject *static_api_divide(PyObject *self, PyObject *args, PyObject *kwargs); PyObject *static_api_divide(PyObject *self, PyObject *args, PyObject *kwargs);
......
文件模式从 100755 更改为 100644
文件模式从 100755 更改为 100644
...@@ -1888,6 +1888,9 @@ def add_n(inputs, name=None): ...@@ -1888,6 +1888,9 @@ def add_n(inputs, name=None):
inputs = [inputs] inputs = [inputs]
return _C_ops.add_n(inputs) return _C_ops.add_n(inputs)
else: else:
if paddle.ir.core._use_new_ir_api():
return paddle._ir_ops.add_n(inputs)
helper = LayerHelper('add_n', **locals()) helper = LayerHelper('add_n', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n') check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
if isinstance(inputs, (list, tuple)): if isinstance(inputs, (list, tuple)):
......
...@@ -37,7 +37,7 @@ def get_ir_program(): ...@@ -37,7 +37,7 @@ def get_ir_program():
class TestBuildOp(unittest.TestCase): class TestBuildOp(unittest.TestCase):
def test_build_op(self): def test_build_mean_op(self):
newir_program = get_ir_program() newir_program = get_ir_program()
tanh_out = newir_program.block().get_ops()[-1].result(0) tanh_out = newir_program.block().get_ops()[-1].result(0)
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True}) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True})
...@@ -55,6 +55,30 @@ class TestBuildOp(unittest.TestCase): ...@@ -55,6 +55,30 @@ class TestBuildOp(unittest.TestCase):
) )
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False})
class TestBuildOp2(unittest.TestCase):
def test_build_add_n_op(self):
newir_program = get_ir_program()
tanh_out = newir_program.block().get_ops()[-1].result(0)
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True})
with paddle.ir.core.program_guard(newir_program):
out1 = paddle.mean(tanh_out)
out2 = paddle.mean(tanh_out)
out = paddle.add_n([out1, out2])
print(newir_program)
self.assertEqual(out.get_defining_op().name(), "pd.add_n")
self.assertEqual(
out.get_defining_op()
.operands()[0]
.source()
.get_defining_op()
.name(),
"builtin.combine",
)
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False})
class TestBuildOp3(unittest.TestCase):
def test_insertion_point(self): def test_insertion_point(self):
newir_program = get_ir_program() newir_program = get_ir_program()
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True}) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True})
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册