未验证 提交 2931d585 编写于 作者: Y YuanRisheng 提交者: GitHub

[New IR]Support build network using new IR in Python (#55761)

* support construct network using ir in Python

* fix py3 bugs

* change source return type to OpResult
上级 92fa8f60
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_api.h"
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/ir/core/builder.h"
namespace paddle {
namespace dialect {
ir::OpResult mean(ir::OpResult x, std::vector<int64_t> axis, bool keepdim) {
paddle::dialect::MeanOp mean_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::MeanOp>(
x, axis, keepdim);
return mean_op.result(0);
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/ir/core/value.h"
namespace paddle {
namespace dialect {
ir::OpResult mean(ir::OpResult x,
std::vector<int64_t> axis = {},
bool keepdim = false);
} // namespace dialect
} // namespace paddle
...@@ -16,7 +16,9 @@ ...@@ -16,7 +16,9 @@
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/ir/core/dialect.h" #include "paddle/ir/core/dialect.h"
#include "paddle/ir/core/enforce.h"
#include "paddle/ir/core/parameter.h" #include "paddle/ir/core/parameter.h"
#include "paddle/ir/core/program.h"
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
...@@ -46,6 +48,53 @@ class PaddleDialect : public ir::Dialect { ...@@ -46,6 +48,53 @@ class PaddleDialect : public ir::Dialect {
void initialize(); void initialize();
}; };
///
/// \brief APIBuilder is used in IR API for building op
///
class APIBuilder {
public:
static APIBuilder& Instance() {
static APIBuilder api_builder;
return api_builder;
}
void SetProgram(ir::Program* program) {
builder_ = std::make_shared<ir::Builder>(ctx_, program->block());
}
/// Set the insertion point to the specified operation, which will cause
/// subsequent insertions to go right before it.
void SetInsertionPoint(ir::Operation* op) {
IR_ENFORCE(builder_ != nullptr,
"builder doesn't hold program, please call SetProgram for "
"initialization.");
builder_->SetInsertionPoint(op);
}
void ResetInsertionPointToStart() {
IR_ENFORCE(builder_ != nullptr,
"builder doesn't hold program, please call SetProgram for "
"initialization.");
builder_->SetInsertionPointToStart(builder_->block());
}
void ResetInsertionPointToEnd() {
IR_ENFORCE(builder_ != nullptr,
"builder doesn't hold program, please call SetProgram for "
"initialization.");
builder_->SetInsertionPointToEnd(builder_->block());
}
std::shared_ptr<ir::Builder> GetBuilder() { return builder_; }
private:
APIBuilder() : builder_(nullptr) {
ctx_ = ir::IrContext::Instance();
ctx_->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
}
ir::IrContext* ctx_;
std::shared_ptr<ir::Builder> builder_;
};
} // namespace dialect } // namespace dialect
} // namespace paddle } // namespace paddle
......
...@@ -124,6 +124,7 @@ set(PYBIND_SRCS ...@@ -124,6 +124,7 @@ set(PYBIND_SRCS
pybind.cc pybind.cc
imperative.cc imperative.cc
inference_api.cc inference_api.cc
static_op_function.cc
ir.cc ir.cc
graph.cc graph.cc
bind_fleet_executor.cc bind_fleet_executor.cc
......
...@@ -10,8 +10,8 @@ See the License for the specific language governing permissions and ...@@ -10,8 +10,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/pybind/eager_utils.h" #include "paddle/fluid/pybind/eager_utils.h"
#include <Python.h> #include <Python.h>
#include "paddle/ir/core/value.h"
// Avoid a problem with copysign defined in pyconfig.h on Windows. // Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign #ifdef copysign
#undef copysign #undef copysign
...@@ -49,6 +49,7 @@ extern PyTypeObject* p_tensor_type; ...@@ -49,6 +49,7 @@ extern PyTypeObject* p_tensor_type;
extern PyTypeObject* p_string_tensor_type; extern PyTypeObject* p_string_tensor_type;
extern PyTypeObject* g_framework_scope_pytype; extern PyTypeObject* g_framework_scope_pytype;
extern PyTypeObject* g_ir_opresult_pytype;
extern PyTypeObject* g_vartype_pytype; extern PyTypeObject* g_vartype_pytype;
extern PyTypeObject* g_place_pytype; extern PyTypeObject* g_place_pytype;
extern PyTypeObject* g_cudaplace_pytype; extern PyTypeObject* g_cudaplace_pytype;
...@@ -858,6 +859,12 @@ PyObject* ToPyObject(const phi::DenseTensor* value) { ...@@ -858,6 +859,12 @@ PyObject* ToPyObject(const phi::DenseTensor* value) {
return obj.ptr(); return obj.ptr();
} }
PyObject* ToPyObject(const ir::OpResult& value) {
auto obj = ::pybind11::cast(value);
obj.inc_ref();
return obj.ptr();
}
#ifdef PADDLE_WITH_DISTRIBUTE #ifdef PADDLE_WITH_DISTRIBUTE
PyObject* ToPyObject(const phi::distributed::DistTensor* value) { PyObject* ToPyObject(const phi::distributed::DistTensor* value) {
auto obj = ::pybind11::cast(value, py::return_value_policy::reference); auto obj = ::pybind11::cast(value, py::return_value_policy::reference);
...@@ -1428,6 +1435,21 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, ...@@ -1428,6 +1435,21 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
} }
} }
ir::OpResult CastPyArg2OpResult(const std::string& op_type,
PyObject* obj,
size_t arg_pos) {
if (PyObject_TypeCheck(obj, g_ir_opresult_pytype)) {
return ::pybind11::handle(obj).cast<ir::OpResult>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"OpResult, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
}
paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos) { ssize_t arg_pos) {
......
...@@ -29,6 +29,7 @@ typedef SSIZE_T ssize_t; ...@@ -29,6 +29,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/function.h" #include "paddle/fluid/jit/function.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/ir/core/value.h"
#include "paddle/phi/common/backend.h" #include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h" #include "paddle/phi/common/int_array.h"
...@@ -75,6 +76,9 @@ std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); ...@@ -75,6 +76,9 @@ std::vector<int> CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos);
std::vector<int64_t> CastPyArg2VectorOfInt64(PyObject* obj, size_t arg_pos); std::vector<int64_t> CastPyArg2VectorOfInt64(PyObject* obj, size_t arg_pos);
std::vector<size_t> CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos); std::vector<size_t> CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos);
std::vector<float> CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos); std::vector<float> CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos);
ir::OpResult CastPyArg2OpResult(const std::string& op_type,
PyObject* obj,
size_t arg_pos);
std::vector<std::vector<size_t>> CastPyArg2VectorOfVectorOfSize_t( std::vector<std::vector<size_t>> CastPyArg2VectorOfVectorOfSize_t(
PyObject* obj, size_t arg_pos); PyObject* obj, size_t arg_pos);
framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
...@@ -128,6 +132,8 @@ PyObject* ToPyObject(const paddle::framework::Vocab& value); ...@@ -128,6 +132,8 @@ PyObject* ToPyObject(const paddle::framework::Vocab& value);
PyObject* ToPyObject(std::shared_ptr<egr::GradNodeBase> grad_node); PyObject* ToPyObject(std::shared_ptr<egr::GradNodeBase> grad_node);
PyObject* ToPyObject(const ir::OpResult& value);
class PyTensorHook : public egr::TensorHook { class PyTensorHook : public egr::TensorHook {
public: public:
explicit PyTensorHook(PyObject* func) : py_func_(func) { explicit PyTensorHook(PyObject* func) : py_func_(func) {
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "paddle/fluid/pybind/ir.h" #include "paddle/fluid/pybind/ir.h"
#include <Python.h>
#include <algorithm> #include <algorithm>
#include <memory> #include <memory>
#include <string> #include <string>
...@@ -21,8 +22,10 @@ ...@@ -21,8 +22,10 @@
#include <unordered_set> #include <unordered_set>
#include <utility> #include <utility>
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/fluid/ir_adaptor/translator/translate.h"
#include "paddle/ir/core/block.h" #include "paddle/ir/core/block.h"
#include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/program.h" #include "paddle/ir/core/program.h"
...@@ -39,26 +42,36 @@ using ir::OpResult; ...@@ -39,26 +42,36 @@ using ir::OpResult;
using ir::Program; using ir::Program;
using ir::Type; using ir::Type;
using ir::Value; using ir::Value;
using paddle::dialect::APIBuilder;
using paddle::dialect::DenseTensorType; using paddle::dialect::DenseTensorType;
using pybind11::return_value_policy; using pybind11::return_value_policy;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
PyTypeObject *g_ir_opresult_pytype = nullptr;
void BindOpsAPI(pybind11::module *module);
void BindProgram(py::module *m) { void BindProgram(py::module *m) {
py::class_<Program> program(*m, "Program"); py::class_<Program> program(*m, "Program");
program.def("parameters_num", &Program::parameters_num) program
.def(
"__init__",
[](Program &self) { new (&self) Program(ir::IrContext::Instance()); })
.def("__str__",
[](Program &self) {
std::ostringstream print_stream;
self.Print(print_stream);
return print_stream.str();
})
.def("parameters_num", &Program::parameters_num)
.def("block", .def("block",
py::overload_cast<>(&Program::block), py::overload_cast<>(&Program::block),
return_value_policy::reference) return_value_policy::reference)
.def("block", .def("block",
py::overload_cast<>(&Program::block, py::const_), py::overload_cast<>(&Program::block, py::const_),
return_value_policy::reference) return_value_policy::reference);
.def("print", [](Program &self) {
std::ostringstream print_stream;
self.Print(print_stream);
LOG(INFO) << print_stream.str();
});
} }
void BindBlock(py::module *m) { void BindBlock(py::module *m) {
...@@ -106,7 +119,7 @@ void BindOperation(py::module *m) { ...@@ -106,7 +119,7 @@ void BindOperation(py::module *m) {
paddle::dialect::OpYamlInfoInterface yaml_interface = paddle::dialect::OpYamlInfoInterface yaml_interface =
self.dyn_cast<paddle::dialect::OpYamlInfoInterface>(); self.dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto inputs_info = std::get<0>(yaml_interface.GetOpInfo()); auto inputs_info = std::get<0>(yaml_interface.GetOpInfo());
for (auto input_info : inputs_info) { for (auto &input_info : inputs_info) {
op_list.append(input_info.name); op_list.append(input_info.name);
} }
return op_list; return op_list;
...@@ -117,7 +130,7 @@ void BindOperation(py::module *m) { ...@@ -117,7 +130,7 @@ void BindOperation(py::module *m) {
paddle::dialect::OpYamlInfoInterface yaml_interface = paddle::dialect::OpYamlInfoInterface yaml_interface =
self.dyn_cast<paddle::dialect::OpYamlInfoInterface>(); self.dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto attrs_info = std::get<1>(yaml_interface.GetOpInfo()); auto attrs_info = std::get<1>(yaml_interface.GetOpInfo());
for (auto attr_info : attrs_info) { for (auto &attr_info : attrs_info) {
op_list.append(attr_info.name); op_list.append(attr_info.name);
} }
return op_list; return op_list;
...@@ -128,7 +141,7 @@ void BindOperation(py::module *m) { ...@@ -128,7 +141,7 @@ void BindOperation(py::module *m) {
paddle::dialect::OpYamlInfoInterface yaml_interface = paddle::dialect::OpYamlInfoInterface yaml_interface =
self.dyn_cast<paddle::dialect::OpYamlInfoInterface>(); self.dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto outputs_info = std::get<2>(yaml_interface.GetOpInfo()); auto outputs_info = std::get<2>(yaml_interface.GetOpInfo());
for (auto output_info : outputs_info) { for (auto &output_info : outputs_info) {
op_list.append(output_info.name); op_list.append(output_info.name);
} }
return op_list; return op_list;
...@@ -147,12 +160,17 @@ void BindValue(py::module *m) { ...@@ -147,12 +160,17 @@ void BindValue(py::module *m) {
void BindOpOperand(py::module *m) { void BindOpOperand(py::module *m) {
py::class_<OpOperand> op_operand(*m, "OpOperand"); py::class_<OpOperand> op_operand(*m, "OpOperand");
op_operand.def("source", &OpOperand::source) op_operand
.def("set_source", &OpOperand::set_source); .def("source",
[](OpOperand &self) { return self.source().dyn_cast<OpResult>(); })
.def("set_source", [](OpOperand &self, const OpResult &result) {
self.set_source(result);
});
} }
void BindOpResult(py::module *m) { void BindOpResult(py::module *m) {
py::class_<OpResult> op_result(*m, "OpResult"); py::class_<OpResult> op_result(*m, "OpResult");
g_ir_opresult_pytype = reinterpret_cast<PyTypeObject *>(op_result.ptr());
op_result op_result
.def("get_defining_op", .def("get_defining_op",
&OpResult::GetDefiningOp, &OpResult::GetDefiningOp,
...@@ -197,7 +215,11 @@ void BindOpResult(py::module *m) { ...@@ -197,7 +215,11 @@ void BindOpResult(py::module *m) {
void BindType(py::module *m) { void BindType(py::module *m) {
py::class_<Type> ir_type(*m, "Type"); py::class_<Type> ir_type(*m, "Type");
ir_type.def("__eq__", [](Type &self, Type &other) { return self == other; }) ir_type.def("__eq__", [](Type &self, Type &other) { return self == other; })
.def("print", [](Type &self) { LOG(INFO) << self; }); .def("__str__", [](Type &self) {
std::ostringstream print_stream;
print_stream << self;
return print_stream.str();
});
} }
void BindUtils(pybind11::module *m) { void BindUtils(pybind11::module *m) {
...@@ -220,17 +242,29 @@ void BindUtils(pybind11::module *m) { ...@@ -220,17 +242,29 @@ void BindUtils(pybind11::module *m) {
"DenseTensorType")); "DenseTensorType"));
} }
}); });
m->def("set_global_program",
[](Program *program) { APIBuilder::Instance().SetProgram(program); });
m->def("set_insertion_point",
[](Operation *op) { APIBuilder::Instance().SetInsertionPoint(op); });
m->def("reset_insertion_point_to_start",
[]() { APIBuilder::Instance().ResetInsertionPointToStart(); });
m->def("reset_insertion_point_to_end",
[]() { APIBuilder::Instance().ResetInsertionPointToEnd(); });
m->def("translate_to_new_ir", &paddle::TranslateLegacyProgramToProgram);
} }
void BindNewIR(pybind11::module *m) { void BindNewIR(pybind11::module *module) {
BindProgram(m); auto ir_module = module->def_submodule("ir");
BindBlock(m); BindProgram(&ir_module);
BindOperation(m); BindBlock(&ir_module);
BindValue(m); BindOperation(&ir_module);
BindOpOperand(m); BindValue(&ir_module);
BindOpResult(m); BindOpOperand(&ir_module);
BindType(m); BindOpResult(&ir_module);
BindUtils(m); BindType(&ir_module);
BindUtils(&ir_module);
auto ops_modules = ir_module.def_submodule("ops");
BindOpsAPI(&ops_modules);
} }
} // namespace pybind } // namespace pybind
......
...@@ -195,7 +195,6 @@ limitations under the License. */ ...@@ -195,7 +195,6 @@ limitations under the License. */
#include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/nan_inf_utils.h" #include "paddle/fluid/eager/nan_inf_utils.h"
#include "paddle/fluid/imperative/layout_autotune.h" #include "paddle/fluid/imperative/layout_autotune.h"
#include "paddle/fluid/ir_adaptor/translator/translate.h"
#include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h" #include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h"
#include "paddle/fluid/prim/utils/static/static_tensor_operants.h" #include "paddle/fluid/prim/utils/static/static_tensor_operants.h"
#include "paddle/fluid/pybind/eager_utils.h" #include "paddle/fluid/pybind/eager_utils.h"
...@@ -2748,7 +2747,6 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2748,7 +2747,6 @@ All parameter, weight, gradient are variables in Paddle.
// Add skipped op list // Add skipped op list
m.def("set_skipped_op_list", m.def("set_skipped_op_list",
[](const std::string &op_list) { egr::SetSkipOpList(op_list); }); [](const std::string &op_list) { egr::SetSkipOpList(op_list); });
m.def("translate_to_new_ir", &paddle::TranslateLegacyProgramToProgram);
BindFleetWrapper(&m); BindFleetWrapper(&m);
BindIO(&m); BindIO(&m);
BindParallelExecutor(m); BindParallelExecutor(m);
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <Python.h>
#include "paddle/fluid/ir/dialect/pd_api.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/fluid/pybind/op_function_common.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace pybind {
PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs) {
try {
VLOG(6) << "Add mean op into program";
VLOG(8) << "args count: " << (PyTuple_Size(args) / 2);
// Get OpResult from args
PyObject *x_obj = PyTuple_GET_ITEM(args, 0);
auto x = CastPyArg2OpResult("mean", x_obj, 0);
// Parse Attributes if needed
PyObject *axis_obj = PyTuple_GET_ITEM(args, 1);
paddle::experimental::IntArray axis =
CastPyArg2IntArray(axis_obj, "mean", 1);
PyObject *keepdim_obj = PyTuple_GET_ITEM(args, 2);
bool keepdim = CastPyArg2Boolean(keepdim_obj, "mean", 2);
// Call ir static api
auto out = paddle::dialect::mean(x, axis.GetData(), keepdim);
return ToPyObject(out);
} catch (...) {
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
}
static PyObject *mean(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_mean(self, args, kwargs);
}
static PyMethodDef OpsAPI[] = {{"mean",
(PyCFunction)(void (*)(void))mean,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for mean."},
{nullptr, nullptr, 0, nullptr}};
void BindOpsAPI(pybind11::module *module) {
if (PyModule_AddFunctions(module->ptr(), OpsAPI) < 0) {
PADDLE_THROW(phi::errors::Fatal("Add C++ api to core.ops failed!"));
}
}
} // namespace pybind
} // namespace paddle
...@@ -1268,3 +1268,15 @@ PHI_DEFINE_EXPORTED_string(tensor_operants_mode, ...@@ -1268,3 +1268,15 @@ PHI_DEFINE_EXPORTED_string(tensor_operants_mode,
PHI_DEFINE_EXPORTED_bool(enable_new_ir_in_executor, PHI_DEFINE_EXPORTED_bool(enable_new_ir_in_executor,
false, false,
"Enable new IR in executor"); "Enable new IR in executor");
/**
* Using new IR API in Python
* Name: enable_new_ir_api
* Since Version: 2.6.0
* Value Range: bool, default=false
* Example:
* Note: If Ture, New IR API will be used in Python
*/
PHI_DEFINE_EXPORTED_bool(enable_new_ir_api,
false,
"Enable new IR API in Python");
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import core
__all__ = []
for name in dir(core.ir.ops):
globals()[name] = getattr(core.ir.ops, name)
__all__.append(name)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.fluid.core import ( from paddle.fluid.libpaddle.ir import (
Program, Program,
Block, Block,
Operation, Operation,
...@@ -21,12 +21,18 @@ from paddle.fluid.core import ( ...@@ -21,12 +21,18 @@ from paddle.fluid.core import (
OpResult, OpResult,
Type, Type,
) # noqa: F401 ) # noqa: F401
from paddle.fluid.core import ( from paddle.fluid.libpaddle.ir import (
get_op_result_shape, get_op_result_shape,
get_op_result_dtype, get_op_result_dtype,
translate_to_new_ir, translate_to_new_ir,
set_global_program,
set_insertion_point,
reset_insertion_point_to_start,
reset_insertion_point_to_end,
) # noqa: F401 ) # noqa: F401
from . import core
__all__ = [ # noqa __all__ = [ # noqa
'Program', 'Program',
'Block', 'Block',
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid.libpaddle.ir import Program, set_global_program
from ..fluid.wrapped_decorator import signature_safe_contextmanager
def _use_new_ir_api():
"""
This API checks whether paddle use new ir api.
Returns:
bool: Whether paddle use new ir api.
"""
if paddle.framework.get_flags("FLAGS_enable_new_ir_api")[
'FLAGS_enable_new_ir_api'
]:
return True
else:
return False
# program is a global instance.
_main_program_ = Program()
# set the global program for c++ and this program will be used to build ops in c++
set_global_program(_main_program_)
_startup_program_ = Program()
def default_startup_program():
"""
Get default/global startup program.
The :code:`paddle.nn` function will append the initialization operators into startup program.
The :code:`startup_program` will initialize the parameters by the OPs.
This method will return the default or the current startup program. Users can use
:ref:`api_paddle_ir_core_program_guard` to switch :ref:`api_paddle_ir_Program` .
Returns:
Program: current default startup program.
Returns type:
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name="x", shape=[-1, 784], dtype='float32')
out = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu")
print("main program is: {}".format(paddle.static.default_main_program()))
print("start up program is: {}".format(paddle.static.default_startup_program()))
"""
return _startup_program_
def default_main_program():
"""
This API can be used to get ``default main program`` which store the
descriptions of Ops and tensors.
For example ``z = paddle.add(x, y)`` will create a new ``add``
Op and a new ``z`` tensor, and they will be recorded in ``default main program`` .
The ``default main program`` is the default value for ``Program`` parameter in
a lot of APIs. For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
If you want to switch the ``default main program``, you can use :ref:`api_paddle_ir_core_program_guard` .
Returns:
Program: A ``Program`` which holding the descriptions of OPs and tensors in the network.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
# Sample Network:
x = paddle.static.data(name='x', shape=[100, 100], dtype='float32')
y = paddle.static.data(name='x', shape=[100, 100], dtype='float32')
out = paddle.add(x, y)
#print the number of blocks in the program, 1 in this case
print(paddle.static.default_main_program().num_blocks) # 1
#print the default_main_program
print(paddle.static.default_main_program())
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
set_global_program(_main_program_)
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@signature_safe_contextmanager
def program_guard(main_program, startup_program=None):
"""
:api_attr: Static Graph
Change the global main program and startup program with ``with`` statement.
Layer functions in the Python ``with`` block will append operators and
Tensors to the new main programs.
Args:
main_program(Program): New main program inside ``with`` statement.
startup_program(Program, optional): New startup program inside ``with``
statement. :code:`None` means not changing startup program,
default_startup_program is still used.
Default: None.
Examples:
.. code-block:: python
:name: code-example-1
import paddle
paddle.enable_static()
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10, activation='relu')
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
.. code-block:: python
:name: code-example-2
import paddle
paddle.enable_static()
main_program = paddle.static.Program()
# does not care about startup program. Just pass a temporary value.
with paddle.static.program_guard(main_program, paddle.static.Program()):
data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
"""
from ..fluid.data_feeder import check_type
check_type(
main_program, 'main_program', Program, 'paddle.static.program_guard'
)
main_program = switch_main_program(main_program)
if startup_program is not None:
check_type(
startup_program,
'startup_program',
Program,
'paddle.static.program_guard',
)
startup_program = switch_startup_program(startup_program)
try:
yield
finally:
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
from . import amp # noqa: F401 from . import amp # noqa: F401
from . import nn # noqa: F401 from . import nn # noqa: F401
...@@ -55,9 +57,8 @@ from ..fluid.compiler import ExecutionStrategy # noqa: F401 ...@@ -55,9 +57,8 @@ from ..fluid.compiler import ExecutionStrategy # noqa: F401
from ..fluid.framework import default_main_program # noqa: F401 from ..fluid.framework import default_main_program # noqa: F401
from ..fluid.framework import default_startup_program # noqa: F401 from ..fluid.framework import default_startup_program # noqa: F401
from ..fluid.framework import device_guard # noqa: F401 from ..fluid.framework import device_guard # noqa: F401
from ..fluid.framework import Program # noqa: F401
from ..fluid.framework import name_scope # noqa: F401 from ..fluid.framework import name_scope # noqa: F401
from ..fluid.framework import program_guard # noqa: F401
from ..fluid.framework import cpu_places # noqa: F401 from ..fluid.framework import cpu_places # noqa: F401
from ..fluid.framework import cuda_places # noqa: F401 from ..fluid.framework import cuda_places # noqa: F401
from ..fluid.framework import xpu_places # noqa: F401 from ..fluid.framework import xpu_places # noqa: F401
...@@ -79,6 +80,15 @@ from .nn.metric import auc # noqa: F401 ...@@ -79,6 +80,15 @@ from .nn.metric import auc # noqa: F401
from .nn.metric import accuracy # noqa: F401 from .nn.metric import accuracy # noqa: F401
from .nn.metric import ctr_metric_bundle # noqa: F401 from .nn.metric import ctr_metric_bundle # noqa: F401
import paddle
if paddle.ir.core._use_new_ir_api():
from ..ir import Program # noqa: F401
from ..ir import program_guard # noqa: F401
else:
from ..fluid.framework import program_guard # noqa: F401
from ..fluid.framework import Program # noqa: F401
__all__ = [ # noqa __all__ = [ # noqa
'append_backward', 'append_backward',
'gradients', 'gradients',
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
# TODO: define statistical functions of a tensor # TODO: define statistical functions of a tensor
import paddle import paddle
from paddle import _C_ops from paddle import _C_ops, _ir_ops, ir
from paddle.framework import in_dynamic_mode from paddle.framework import in_dynamic_mode
from ..common_ops_import import Variable from ..common_ops_import import Variable
...@@ -82,6 +82,8 @@ def mean(x, axis=None, keepdim=False, name=None): ...@@ -82,6 +82,8 @@ def mean(x, axis=None, keepdim=False, name=None):
if in_dynamic_mode(): if in_dynamic_mode():
return _C_ops.mean(x, axis, keepdim) return _C_ops.mean(x, axis, keepdim)
else: else:
if ir.core._use_new_ir_api():
return _ir_ops.mean(x, axis, keepdim)
reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
check_variable_and_dtype( check_variable_and_dtype(
x, x,
......
...@@ -498,6 +498,7 @@ packages=['paddle', ...@@ -498,6 +498,7 @@ packages=['paddle',
'paddle.geometric', 'paddle.geometric',
'paddle.geometric.message_passing', 'paddle.geometric.message_passing',
'paddle.geometric.sampling', 'paddle.geometric.sampling',
'paddle.ir',
] ]
with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f:
......
...@@ -1496,6 +1496,7 @@ def get_setup_parameters(): ...@@ -1496,6 +1496,7 @@ def get_setup_parameters():
'paddle.geometric', 'paddle.geometric',
'paddle.geometric.message_passing', 'paddle.geometric.message_passing',
'paddle.geometric.sampling', 'paddle.geometric.sampling',
'paddle.ir',
] ]
paddle_bins = '' paddle_bins = ''
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
from paddle import ir
paddle.enable_static()
def get_ir_program():
x = paddle.randn([4, 4])
main_program, start_program = (
paddle.static.Program(),
paddle.static.Program(),
)
with paddle.static.program_guard(main_program, start_program):
x_s = paddle.static.data('x', [4, 4], x.dtype)
x_s.stop_gradient = False
y_s = paddle.matmul(x_s, x_s)
y_s = paddle.add(x_s, y_s)
y_s = paddle.tanh(y_s)
newir_program = ir.translate_to_new_ir(main_program.desc)
return newir_program
class TestBuildOp(unittest.TestCase):
def test_build_op(self):
newir_program = get_ir_program()
tanh_out = newir_program.block().get_ops()[-1].result(0)
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True})
with paddle.ir.core.program_guard(newir_program):
out = paddle.mean(tanh_out)
print(newir_program)
self.assertEqual(out.get_defining_op().name(), "pd.mean")
self.assertEqual(
out.get_defining_op()
.operands()[0]
.source()
.get_defining_op()
.name(),
"pd.tanh",
)
def test_insertion_point(self):
newir_program = get_ir_program()
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True})
add_op = newir_program.block().get_ops()[-2]
tanh_op = newir_program.block().get_ops()[-1]
add_out = add_op.result(0)
tanh_operand = tanh_op.operands()[0]
with paddle.ir.core.program_guard(newir_program):
ir.set_insertion_point(tanh_op)
out = paddle.mean(add_out)
tanh_operand.set_source(out)
print(newir_program)
self.assertEqual(
tanh_operand.source().get_defining_op().name(), "pd.mean"
)
if __name__ == "__main__":
unittest.main()
...@@ -39,17 +39,17 @@ def get_ir_program(): ...@@ -39,17 +39,17 @@ def get_ir_program():
class TestPybind(unittest.TestCase): class TestPybind(unittest.TestCase):
def test_program(self): def test_program(self):
newir_program = get_ir_program() newir_program = get_ir_program()
newir_program.print() print(newir_program)
def test_block(self): def test_block(self):
newir_program = get_ir_program() newir_program = get_ir_program()
block = newir_program.block() block = newir_program.block()
ops = block.get_ops() ops = block.get_ops()
self.assertTrue( self.assertEqual(
len(ops), 4 len(ops), 4
) # ir program add "builtin.get_parameter" by default, so size is 4 ) # ir program add "builtin.get_parameter" by default, so size is 4
block.remove_op(ops[3]) block.remove_op(ops[3])
self.assertTrue(len(block.get_ops()), 3) self.assertEqual(len(block.get_ops()), 3)
def test_operation(self): def test_operation(self):
newir_program = get_ir_program() newir_program = get_ir_program()
...@@ -59,49 +59,49 @@ class TestPybind(unittest.TestCase): ...@@ -59,49 +59,49 @@ class TestPybind(unittest.TestCase):
tanh_op = newir_program.block().get_ops()[3] tanh_op = newir_program.block().get_ops()[3]
parent_block = tanh_op.get_parent() parent_block = tanh_op.get_parent()
parent_ops_num = len(parent_block.get_ops()) parent_ops_num = len(parent_block.get_ops())
self.assertTrue(parent_ops_num, 4) self.assertEqual(parent_ops_num, 4)
self.assertTrue(tanh_op.num_results(), 1) self.assertEqual(tanh_op.num_results(), 1)
self.assertTrue(len(matmul_op.get_input_names()), 2) self.assertEqual(len(matmul_op.get_input_names()), 2)
self.assertTrue(len(matmul_op.get_attr_names()), 2) self.assertEqual(len(matmul_op.get_attr_names()), 2)
self.assertTrue(len(matmul_op.get_output_names()), 1) self.assertEqual(len(matmul_op.get_output_names()), 1)
def test_value(self): def test_value(self):
newir_program = get_ir_program() newir_program = get_ir_program()
matmul_op = newir_program.block().get_ops()[1] matmul_op = newir_program.block().get_ops()[1]
add_op = newir_program.block().get_ops()[2] add_op = newir_program.block().get_ops()[2]
tanh_op = newir_program.block().get_ops()[3] tanh_op = newir_program.block().get_ops()[3]
self.assertTrue( self.assertEqual(
matmul_op.results()[0].get_defining_op().name(), "pd.matmul" matmul_op.results()[0].get_defining_op().name(), "pd.matmul"
) )
self.assertTrue( self.assertEqual(
matmul_op.result(0).get_defining_op().name(), "pd.matmul" matmul_op.result(0).get_defining_op().name(), "pd.matmul"
) )
matmul_op.result(0).set_stop_gradient(True) matmul_op.result(0).set_stop_gradient(True)
self.assertTrue(matmul_op.result(0).get_stop_gradient, True) self.assertEqual(matmul_op.result(0).get_stop_gradient(), True)
self.assertTrue( self.assertEqual(
tanh_op.operands()[0].source().get_defining_op(), "pd.add" tanh_op.operands()[0].source().get_defining_op().name(), "pd.add"
) )
add_op.replace_all_uses_with(matmul_op.results()) add_op.replace_all_uses_with(matmul_op.results())
self.assertTrue( self.assertEqual(
tanh_op.operands()[0].source().get_defining_op(), "pd.matmul" tanh_op.operands()[0].source().get_defining_op().name(), "pd.matmul"
) )
self.assertTrue(add_op.result(0).use_empty(), False) self.assertEqual(add_op.result(0).use_empty(), True)
def test_type(self): def test_type(self):
newir_program = get_ir_program() newir_program = get_ir_program()
matmul_op = newir_program.block().get_ops()[1] matmul_op = newir_program.block().get_ops()[1]
add_op = newir_program.block().get_ops()[2] add_op = newir_program.block().get_ops()[2]
matmul_op.result(0).type().print() print(matmul_op.result(0).type())
self.assertTrue( self.assertEqual(
matmul_op.result(0).type() == add_op.result(0).type(), True matmul_op.result(0).type() == add_op.result(0).type(), True
) )
def test_utils(self): def test_utils(self):
newir_program = get_ir_program() newir_program = get_ir_program()
matmul_op = newir_program.block().get_ops()[1] matmul_op = newir_program.block().get_ops()[1]
print(ir.get_op_result_dtype(matmul_op.result(0)).print()) print(ir.get_op_result_dtype(matmul_op.result(0)))
self.assertEqual(ir.get_op_result_shape(matmul_op.result(0)), [4, 4]) self.assertEqual(ir.get_op_result_shape(matmul_op.result(0)), [4, 4])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册