diff --git a/paddle/fluid/ir/dialect/pd_api.cc b/paddle/fluid/ir/dialect/pd_api.cc new file mode 100644 index 0000000000000000000000000000000000000000..65f090c89c1a9f9de668f7c71e8e9b76d4552830 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_api.cc @@ -0,0 +1,30 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_api.h" +#include "paddle/fluid/ir/dialect/pd_dialect.h" +#include "paddle/fluid/ir/dialect/pd_op.h" +#include "paddle/ir/core/builder.h" + +namespace paddle { +namespace dialect { +ir::OpResult mean(ir::OpResult x, std::vector axis, bool keepdim) { + paddle::dialect::MeanOp mean_op = + APIBuilder::Instance().GetBuilder()->Build( + x, axis, keepdim); + return mean_op.result(0); +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_api.h b/paddle/fluid/ir/dialect/pd_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d18f62ff63c1e32e3fbb16f906f47f748179470c --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_api.h @@ -0,0 +1,29 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/ir/core/value.h" + +namespace paddle { +namespace dialect { + +ir::OpResult mean(ir::OpResult x, + std::vector axis = {}, + bool keepdim = false); + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_dialect.h b/paddle/fluid/ir/dialect/pd_dialect.h index a9ca4a497ef21f3c01aa9acc48629cc3a6a0545c..db42b4defdc49060a2d333dcf21ccdafccbe2b37 100644 --- a/paddle/fluid/ir/dialect/pd_dialect.h +++ b/paddle/fluid/ir/dialect/pd_dialect.h @@ -16,7 +16,9 @@ #include "paddle/fluid/framework/variable.h" #include "paddle/ir/core/dialect.h" +#include "paddle/ir/core/enforce.h" #include "paddle/ir/core/parameter.h" +#include "paddle/ir/core/program.h" namespace paddle { namespace dialect { @@ -46,6 +48,53 @@ class PaddleDialect : public ir::Dialect { void initialize(); }; +/// +/// \brief APIBuilder is used in IR API for building op +/// +class APIBuilder { + public: + static APIBuilder& Instance() { + static APIBuilder api_builder; + return api_builder; + } + void SetProgram(ir::Program* program) { + builder_ = std::make_shared(ctx_, program->block()); + } + + /// Set the insertion point to the specified operation, which will cause + /// subsequent insertions to go right before it. + void SetInsertionPoint(ir::Operation* op) { + IR_ENFORCE(builder_ != nullptr, + "builder doesn't hold program, please call SetProgram for " + "initialization."); + builder_->SetInsertionPoint(op); + } + + void ResetInsertionPointToStart() { + IR_ENFORCE(builder_ != nullptr, + "builder doesn't hold program, please call SetProgram for " + "initialization."); + builder_->SetInsertionPointToStart(builder_->block()); + } + + void ResetInsertionPointToEnd() { + IR_ENFORCE(builder_ != nullptr, + "builder doesn't hold program, please call SetProgram for " + "initialization."); + builder_->SetInsertionPointToEnd(builder_->block()); + } + + std::shared_ptr GetBuilder() { return builder_; } + + private: + APIBuilder() : builder_(nullptr) { + ctx_ = ir::IrContext::Instance(); + ctx_->GetOrRegisterDialect(); + } + ir::IrContext* ctx_; + std::shared_ptr builder_; +}; + } // namespace dialect } // namespace paddle diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 798611db6d3729afe89a01afda7e6676e53b061c..044a592e56701ea24ed1a784735c24f83f4d4f50 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -124,6 +124,7 @@ set(PYBIND_SRCS pybind.cc imperative.cc inference_api.cc + static_op_function.cc ir.cc graph.cc bind_fleet_executor.cc diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index ee270042f4176e08c0d9c70a66fbcd05b8845eaf..fbe1c6986aada8a3cba2321378a3edbce5dac6cb 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -10,8 +10,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/pybind/eager_utils.h" - #include +#include "paddle/ir/core/value.h" // Avoid a problem with copysign defined in pyconfig.h on Windows. #ifdef copysign #undef copysign @@ -49,6 +49,7 @@ extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_string_tensor_type; extern PyTypeObject* g_framework_scope_pytype; +extern PyTypeObject* g_ir_opresult_pytype; extern PyTypeObject* g_vartype_pytype; extern PyTypeObject* g_place_pytype; extern PyTypeObject* g_cudaplace_pytype; @@ -858,6 +859,12 @@ PyObject* ToPyObject(const phi::DenseTensor* value) { return obj.ptr(); } +PyObject* ToPyObject(const ir::OpResult& value) { + auto obj = ::pybind11::cast(value); + obj.inc_ref(); + return obj.ptr(); +} + #ifdef PADDLE_WITH_DISTRIBUTE PyObject* ToPyObject(const phi::distributed::DistTensor* value) { auto obj = ::pybind11::cast(value, py::return_value_policy::reference); @@ -1428,6 +1435,21 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, } } +ir::OpResult CastPyArg2OpResult(const std::string& op_type, + PyObject* obj, + size_t arg_pos) { + if (PyObject_TypeCheck(obj, g_ir_opresult_pytype)) { + return ::pybind11::handle(obj).cast(); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be " + "OpResult, but got %s", + op_type, + arg_pos + 1, + ((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT + } +} + paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, const std::string& op_type, ssize_t arg_pos) { diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index f50ec9395b2f1f2b0fcbdbceee5b722915eabc0b..4e66ba95600dd82258356f71af9b4b5f58574d64 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -29,6 +29,7 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/jit/function.h" #include "paddle/fluid/platform/place.h" +#include "paddle/ir/core/value.h" #include "paddle/phi/common/backend.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/int_array.h" @@ -75,6 +76,9 @@ std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); std::vector CastPyArg2VectorOfInt64(PyObject* obj, size_t arg_pos); std::vector CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos); std::vector CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos); +ir::OpResult CastPyArg2OpResult(const std::string& op_type, + PyObject* obj, + size_t arg_pos); std::vector> CastPyArg2VectorOfVectorOfSize_t( PyObject* obj, size_t arg_pos); framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, @@ -128,6 +132,8 @@ PyObject* ToPyObject(const paddle::framework::Vocab& value); PyObject* ToPyObject(std::shared_ptr grad_node); +PyObject* ToPyObject(const ir::OpResult& value); + class PyTensorHook : public egr::TensorHook { public: explicit PyTensorHook(PyObject* func) : py_func_(func) { diff --git a/paddle/fluid/pybind/ir.cc b/paddle/fluid/pybind/ir.cc index d73762f09519c3e8919d4bb802b0435d8ad58bb9..ead5e6889d9632f826218a99c664850f5120e663 100644 --- a/paddle/fluid/pybind/ir.cc +++ b/paddle/fluid/pybind/ir.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/pybind/ir.h" +#include #include #include #include @@ -21,8 +22,10 @@ #include #include +#include "paddle/fluid/ir/dialect/pd_dialect.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" +#include "paddle/fluid/ir_adaptor/translator/translate.h" #include "paddle/ir/core/block.h" #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/program.h" @@ -39,26 +42,36 @@ using ir::OpResult; using ir::Program; using ir::Type; using ir::Value; +using paddle::dialect::APIBuilder; using paddle::dialect::DenseTensorType; using pybind11::return_value_policy; namespace paddle { namespace pybind { +PyTypeObject *g_ir_opresult_pytype = nullptr; + +void BindOpsAPI(pybind11::module *module); + void BindProgram(py::module *m) { py::class_ program(*m, "Program"); - program.def("parameters_num", &Program::parameters_num) + program + .def( + "__init__", + [](Program &self) { new (&self) Program(ir::IrContext::Instance()); }) + .def("__str__", + [](Program &self) { + std::ostringstream print_stream; + self.Print(print_stream); + return print_stream.str(); + }) + .def("parameters_num", &Program::parameters_num) .def("block", py::overload_cast<>(&Program::block), return_value_policy::reference) .def("block", py::overload_cast<>(&Program::block, py::const_), - return_value_policy::reference) - .def("print", [](Program &self) { - std::ostringstream print_stream; - self.Print(print_stream); - LOG(INFO) << print_stream.str(); - }); + return_value_policy::reference); } void BindBlock(py::module *m) { @@ -106,7 +119,7 @@ void BindOperation(py::module *m) { paddle::dialect::OpYamlInfoInterface yaml_interface = self.dyn_cast(); auto inputs_info = std::get<0>(yaml_interface.GetOpInfo()); - for (auto input_info : inputs_info) { + for (auto &input_info : inputs_info) { op_list.append(input_info.name); } return op_list; @@ -117,7 +130,7 @@ void BindOperation(py::module *m) { paddle::dialect::OpYamlInfoInterface yaml_interface = self.dyn_cast(); auto attrs_info = std::get<1>(yaml_interface.GetOpInfo()); - for (auto attr_info : attrs_info) { + for (auto &attr_info : attrs_info) { op_list.append(attr_info.name); } return op_list; @@ -128,7 +141,7 @@ void BindOperation(py::module *m) { paddle::dialect::OpYamlInfoInterface yaml_interface = self.dyn_cast(); auto outputs_info = std::get<2>(yaml_interface.GetOpInfo()); - for (auto output_info : outputs_info) { + for (auto &output_info : outputs_info) { op_list.append(output_info.name); } return op_list; @@ -147,12 +160,17 @@ void BindValue(py::module *m) { void BindOpOperand(py::module *m) { py::class_ op_operand(*m, "OpOperand"); - op_operand.def("source", &OpOperand::source) - .def("set_source", &OpOperand::set_source); + op_operand + .def("source", + [](OpOperand &self) { return self.source().dyn_cast(); }) + .def("set_source", [](OpOperand &self, const OpResult &result) { + self.set_source(result); + }); } void BindOpResult(py::module *m) { py::class_ op_result(*m, "OpResult"); + g_ir_opresult_pytype = reinterpret_cast(op_result.ptr()); op_result .def("get_defining_op", &OpResult::GetDefiningOp, @@ -197,7 +215,11 @@ void BindOpResult(py::module *m) { void BindType(py::module *m) { py::class_ ir_type(*m, "Type"); ir_type.def("__eq__", [](Type &self, Type &other) { return self == other; }) - .def("print", [](Type &self) { LOG(INFO) << self; }); + .def("__str__", [](Type &self) { + std::ostringstream print_stream; + print_stream << self; + return print_stream.str(); + }); } void BindUtils(pybind11::module *m) { @@ -220,17 +242,29 @@ void BindUtils(pybind11::module *m) { "DenseTensorType")); } }); + m->def("set_global_program", + [](Program *program) { APIBuilder::Instance().SetProgram(program); }); + m->def("set_insertion_point", + [](Operation *op) { APIBuilder::Instance().SetInsertionPoint(op); }); + m->def("reset_insertion_point_to_start", + []() { APIBuilder::Instance().ResetInsertionPointToStart(); }); + m->def("reset_insertion_point_to_end", + []() { APIBuilder::Instance().ResetInsertionPointToEnd(); }); + m->def("translate_to_new_ir", &paddle::TranslateLegacyProgramToProgram); } -void BindNewIR(pybind11::module *m) { - BindProgram(m); - BindBlock(m); - BindOperation(m); - BindValue(m); - BindOpOperand(m); - BindOpResult(m); - BindType(m); - BindUtils(m); +void BindNewIR(pybind11::module *module) { + auto ir_module = module->def_submodule("ir"); + BindProgram(&ir_module); + BindBlock(&ir_module); + BindOperation(&ir_module); + BindValue(&ir_module); + BindOpOperand(&ir_module); + BindOpResult(&ir_module); + BindType(&ir_module); + BindUtils(&ir_module); + auto ops_modules = ir_module.def_submodule("ops"); + BindOpsAPI(&ops_modules); } } // namespace pybind diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index f5c32880d238693fe15f1859595a73fd0693c8af..5b7bd9e579a4af6a9f9dd4ee48b6874b7527dc9f 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -195,7 +195,6 @@ limitations under the License. */ #include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/nan_inf_utils.h" #include "paddle/fluid/imperative/layout_autotune.h" -#include "paddle/fluid/ir_adaptor/translator/translate.h" #include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h" #include "paddle/fluid/prim/utils/static/static_tensor_operants.h" #include "paddle/fluid/pybind/eager_utils.h" @@ -2748,7 +2747,6 @@ All parameter, weight, gradient are variables in Paddle. // Add skipped op list m.def("set_skipped_op_list", [](const std::string &op_list) { egr::SetSkipOpList(op_list); }); - m.def("translate_to_new_ir", &paddle::TranslateLegacyProgramToProgram); BindFleetWrapper(&m); BindIO(&m); BindParallelExecutor(m); diff --git a/paddle/fluid/pybind/static_op_function.cc b/paddle/fluid/pybind/static_op_function.cc new file mode 100644 index 0000000000000000000000000000000000000000..d19d163a5f1766ea0129a4e2f252c82c894745ae --- /dev/null +++ b/paddle/fluid/pybind/static_op_function.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include "paddle/fluid/ir/dialect/pd_api.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/fluid/pybind/exception.h" +#include "paddle/fluid/pybind/op_function_common.h" +#include "paddle/phi/common/int_array.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace pybind { + +PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs) { + try { + VLOG(6) << "Add mean op into program"; + VLOG(8) << "args count: " << (PyTuple_Size(args) / 2); + // Get OpResult from args + PyObject *x_obj = PyTuple_GET_ITEM(args, 0); + auto x = CastPyArg2OpResult("mean", x_obj, 0); + + // Parse Attributes if needed + PyObject *axis_obj = PyTuple_GET_ITEM(args, 1); + paddle::experimental::IntArray axis = + CastPyArg2IntArray(axis_obj, "mean", 1); + PyObject *keepdim_obj = PyTuple_GET_ITEM(args, 2); + bool keepdim = CastPyArg2Boolean(keepdim_obj, "mean", 2); + + // Call ir static api + auto out = paddle::dialect::mean(x, axis.GetData(), keepdim); + return ToPyObject(out); + } catch (...) { + ThrowExceptionToPython(std::current_exception()); + return nullptr; + } +} + +static PyObject *mean(PyObject *self, PyObject *args, PyObject *kwargs) { + return static_api_mean(self, args, kwargs); +} + +static PyMethodDef OpsAPI[] = {{"mean", + (PyCFunction)(void (*)(void))mean, + METH_VARARGS | METH_KEYWORDS, + "C++ interface function for mean."}, + {nullptr, nullptr, 0, nullptr}}; + +void BindOpsAPI(pybind11::module *module) { + if (PyModule_AddFunctions(module->ptr(), OpsAPI) < 0) { + PADDLE_THROW(phi::errors::Fatal("Add C++ api to core.ops failed!")); + } +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/phi/core/flags.cc b/paddle/phi/core/flags.cc index 0c581fb09919fce28a9924938d29035e52e4721e..a0e92dfa335cb8db243b29c5ae84316b1d2072a0 100644 --- a/paddle/phi/core/flags.cc +++ b/paddle/phi/core/flags.cc @@ -1268,3 +1268,15 @@ PHI_DEFINE_EXPORTED_string(tensor_operants_mode, PHI_DEFINE_EXPORTED_bool(enable_new_ir_in_executor, false, "Enable new IR in executor"); + +/** + * Using new IR API in Python + * Name: enable_new_ir_api + * Since Version: 2.6.0 + * Value Range: bool, default=false + * Example: + * Note: If Ture, New IR API will be used in Python + */ +PHI_DEFINE_EXPORTED_bool(enable_new_ir_api, + false, + "Enable new IR API in Python"); diff --git a/python/paddle/_ir_ops.py b/python/paddle/_ir_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..1781664dd6cb9116777e08ec42aca313c96a4ecc --- /dev/null +++ b/python/paddle/_ir_ops.py @@ -0,0 +1,21 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.fluid import core + +__all__ = [] + +for name in dir(core.ir.ops): + globals()[name] = getattr(core.ir.ops, name) + __all__.append(name) diff --git a/python/paddle/ir/__init__.py b/python/paddle/ir/__init__.py index 1d26a81e47524b709911daadf434e5d8fca9cad1..2ce712f088c2f7ed852493376db08c17d890fdaa 100755 --- a/python/paddle/ir/__init__.py +++ b/python/paddle/ir/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.core import ( +from paddle.fluid.libpaddle.ir import ( Program, Block, Operation, @@ -21,12 +21,18 @@ from paddle.fluid.core import ( OpResult, Type, ) # noqa: F401 -from paddle.fluid.core import ( +from paddle.fluid.libpaddle.ir import ( get_op_result_shape, get_op_result_dtype, translate_to_new_ir, + set_global_program, + set_insertion_point, + reset_insertion_point_to_start, + reset_insertion_point_to_end, ) # noqa: F401 +from . import core + __all__ = [ # noqa 'Program', 'Block', diff --git a/python/paddle/ir/core.py b/python/paddle/ir/core.py new file mode 100755 index 0000000000000000000000000000000000000000..9310c9b75bf6562bc748587580f56544a76b0d0e --- /dev/null +++ b/python/paddle/ir/core.py @@ -0,0 +1,207 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import paddle +from paddle.fluid.libpaddle.ir import Program, set_global_program + +from ..fluid.wrapped_decorator import signature_safe_contextmanager + + +def _use_new_ir_api(): + """ + This API checks whether paddle use new ir api. + + Returns: + bool: Whether paddle use new ir api. + + """ + if paddle.framework.get_flags("FLAGS_enable_new_ir_api")[ + 'FLAGS_enable_new_ir_api' + ]: + return True + else: + return False + + +# program is a global instance. +_main_program_ = Program() +# set the global program for c++ and this program will be used to build ops in c++ +set_global_program(_main_program_) + +_startup_program_ = Program() + + +def default_startup_program(): + """ + Get default/global startup program. + + The :code:`paddle.nn` function will append the initialization operators into startup program. + The :code:`startup_program` will initialize the parameters by the OPs. + + This method will return the default or the current startup program. Users can use + :ref:`api_paddle_ir_core_program_guard` to switch :ref:`api_paddle_ir_Program` . + + Returns: + Program: current default startup program. + + Returns type: + + Examples: + .. code-block:: python + + import paddle + + paddle.enable_static() + x = paddle.static.data(name="x", shape=[-1, 784], dtype='float32') + out = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu") + print("main program is: {}".format(paddle.static.default_main_program())) + print("start up program is: {}".format(paddle.static.default_startup_program())) + """ + return _startup_program_ + + +def default_main_program(): + """ + This API can be used to get ``default main program`` which store the + descriptions of Ops and tensors. + + For example ``z = paddle.add(x, y)`` will create a new ``add`` + Op and a new ``z`` tensor, and they will be recorded in ``default main program`` . + + The ``default main program`` is the default value for ``Program`` parameter in + a lot of APIs. For example, the :code:`Executor.run()` will execute the + :code:`default_main_program` when the program is not specified. + + If you want to switch the ``default main program``, you can use :ref:`api_paddle_ir_core_program_guard` . + + Returns: + Program: A ``Program`` which holding the descriptions of OPs and tensors in the network. + + Examples: + .. code-block:: python + + import paddle + + paddle.enable_static() + # Sample Network: + x = paddle.static.data(name='x', shape=[100, 100], dtype='float32') + y = paddle.static.data(name='x', shape=[100, 100], dtype='float32') + out = paddle.add(x, y) + + #print the number of blocks in the program, 1 in this case + print(paddle.static.default_main_program().num_blocks) # 1 + #print the default_main_program + print(paddle.static.default_main_program()) + """ + return _main_program_ + + +def switch_main_program(program): + """ + Switch the main program to a new program. + + Args: + program(Program): The new main program + + Returns: + Program: The previous main program + """ + global _main_program_ + prev_program = _main_program_ + _main_program_ = program + set_global_program(_main_program_) + return prev_program + + +def switch_startup_program(program): + """ + Switch the startup program to a new program + Args: + program(Program): The new startup program + + Returns: + Program: The previous startup program + """ + global _startup_program_ + prev_program = _startup_program_ + _startup_program_ = program + return prev_program + + +@signature_safe_contextmanager +def program_guard(main_program, startup_program=None): + """ + :api_attr: Static Graph + + Change the global main program and startup program with ``with`` statement. + Layer functions in the Python ``with`` block will append operators and + Tensors to the new main programs. + + Args: + main_program(Program): New main program inside ``with`` statement. + startup_program(Program, optional): New startup program inside ``with`` + statement. :code:`None` means not changing startup program, + default_startup_program is still used. + Default: None. + + Examples: + .. code-block:: python + :name: code-example-1 + + import paddle + + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + hidden = paddle.static.nn.fc(x=data, size=10, activation='relu') + + Notes: The temporary :code:`Program` can be used if the user does not need + to construct either of startup program or main program. + + Examples: + .. code-block:: python + :name: code-example-2 + + import paddle + + paddle.enable_static() + main_program = paddle.static.Program() + # does not care about startup program. Just pass a temporary value. + with paddle.static.program_guard(main_program, paddle.static.Program()): + data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + + """ + from ..fluid.data_feeder import check_type + + check_type( + main_program, 'main_program', Program, 'paddle.static.program_guard' + ) + main_program = switch_main_program(main_program) + if startup_program is not None: + check_type( + startup_program, + 'startup_program', + Program, + 'paddle.static.program_guard', + ) + startup_program = switch_startup_program(startup_program) + try: + yield + finally: + switch_main_program(main_program) + if startup_program is not None: + switch_startup_program(startup_program) diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 94b91db295cb90a927724d4b1c1d5f598a5f08a2..5635554e69b566214dc56a5d8b4b54857a92aa62 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os + from . import amp # noqa: F401 from . import nn # noqa: F401 @@ -55,9 +57,8 @@ from ..fluid.compiler import ExecutionStrategy # noqa: F401 from ..fluid.framework import default_main_program # noqa: F401 from ..fluid.framework import default_startup_program # noqa: F401 from ..fluid.framework import device_guard # noqa: F401 -from ..fluid.framework import Program # noqa: F401 + from ..fluid.framework import name_scope # noqa: F401 -from ..fluid.framework import program_guard # noqa: F401 from ..fluid.framework import cpu_places # noqa: F401 from ..fluid.framework import cuda_places # noqa: F401 from ..fluid.framework import xpu_places # noqa: F401 @@ -79,6 +80,15 @@ from .nn.metric import auc # noqa: F401 from .nn.metric import accuracy # noqa: F401 from .nn.metric import ctr_metric_bundle # noqa: F401 +import paddle + +if paddle.ir.core._use_new_ir_api(): + from ..ir import Program # noqa: F401 + from ..ir import program_guard # noqa: F401 +else: + from ..fluid.framework import program_guard # noqa: F401 + from ..fluid.framework import Program # noqa: F401 + __all__ = [ # noqa 'append_backward', 'gradients', diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 004db97089f725bbfd957774992b1c53b0fddb8f..69789dbafd4299acc590853c6070eef73944afa2 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -15,7 +15,7 @@ # TODO: define statistical functions of a tensor import paddle -from paddle import _C_ops +from paddle import _C_ops, _ir_ops, ir from paddle.framework import in_dynamic_mode from ..common_ops_import import Variable @@ -82,6 +82,8 @@ def mean(x, axis=None, keepdim=False, name=None): if in_dynamic_mode(): return _C_ops.mean(x, axis, keepdim) else: + if ir.core._use_new_ir_api(): + return _ir_ops.mean(x, axis, keepdim) reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) check_variable_and_dtype( x, diff --git a/python/setup.py.in b/python/setup.py.in index 0bab54bf227634bf20306a70d0212f53c1acc7c4..d1a6388a97627dbf769f2b6908690e7dcb2101fc 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -498,6 +498,7 @@ packages=['paddle', 'paddle.geometric', 'paddle.geometric.message_passing', 'paddle.geometric.sampling', + 'paddle.ir', ] with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: diff --git a/setup.py b/setup.py index 06ae9c99322d54a1564825759cd2501e5e52d124..dda195d8aadeaba72c1fd08db2d9d9f50d8e8f25 100644 --- a/setup.py +++ b/setup.py @@ -1496,6 +1496,7 @@ def get_setup_parameters(): 'paddle.geometric', 'paddle.geometric.message_passing', 'paddle.geometric.sampling', + 'paddle.ir', ] paddle_bins = '' diff --git a/test/ir/new_ir/test_build_op.py b/test/ir/new_ir/test_build_op.py new file mode 100644 index 0000000000000000000000000000000000000000..b8bab3ac6c0adf86000abe5ce66b22bc79a9c401 --- /dev/null +++ b/test/ir/new_ir/test_build_op.py @@ -0,0 +1,77 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle +from paddle import ir + +paddle.enable_static() + + +def get_ir_program(): + x = paddle.randn([4, 4]) + main_program, start_program = ( + paddle.static.Program(), + paddle.static.Program(), + ) + with paddle.static.program_guard(main_program, start_program): + x_s = paddle.static.data('x', [4, 4], x.dtype) + x_s.stop_gradient = False + y_s = paddle.matmul(x_s, x_s) + y_s = paddle.add(x_s, y_s) + y_s = paddle.tanh(y_s) + newir_program = ir.translate_to_new_ir(main_program.desc) + return newir_program + + +class TestBuildOp(unittest.TestCase): + def test_build_op(self): + newir_program = get_ir_program() + tanh_out = newir_program.block().get_ops()[-1].result(0) + paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True}) + with paddle.ir.core.program_guard(newir_program): + out = paddle.mean(tanh_out) + print(newir_program) + self.assertEqual(out.get_defining_op().name(), "pd.mean") + self.assertEqual( + out.get_defining_op() + .operands()[0] + .source() + .get_defining_op() + .name(), + "pd.tanh", + ) + + def test_insertion_point(self): + newir_program = get_ir_program() + paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True}) + add_op = newir_program.block().get_ops()[-2] + tanh_op = newir_program.block().get_ops()[-1] + add_out = add_op.result(0) + tanh_operand = tanh_op.operands()[0] + + with paddle.ir.core.program_guard(newir_program): + ir.set_insertion_point(tanh_op) + out = paddle.mean(add_out) + tanh_operand.set_source(out) + + print(newir_program) + self.assertEqual( + tanh_operand.source().get_defining_op().name(), "pd.mean" + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/ir/new_ir/test_ir_pybind.py b/test/ir/new_ir/test_ir_pybind.py index 0645f620382057c261a5d50284f86716c7169062..90bf976ac3f1b19326bbccfe188c61c00b936dd3 100644 --- a/test/ir/new_ir/test_ir_pybind.py +++ b/test/ir/new_ir/test_ir_pybind.py @@ -39,17 +39,17 @@ def get_ir_program(): class TestPybind(unittest.TestCase): def test_program(self): newir_program = get_ir_program() - newir_program.print() + print(newir_program) def test_block(self): newir_program = get_ir_program() block = newir_program.block() ops = block.get_ops() - self.assertTrue( + self.assertEqual( len(ops), 4 ) # ir program add "builtin.get_parameter" by default, so size is 4 block.remove_op(ops[3]) - self.assertTrue(len(block.get_ops()), 3) + self.assertEqual(len(block.get_ops()), 3) def test_operation(self): newir_program = get_ir_program() @@ -59,49 +59,49 @@ class TestPybind(unittest.TestCase): tanh_op = newir_program.block().get_ops()[3] parent_block = tanh_op.get_parent() parent_ops_num = len(parent_block.get_ops()) - self.assertTrue(parent_ops_num, 4) - self.assertTrue(tanh_op.num_results(), 1) - self.assertTrue(len(matmul_op.get_input_names()), 2) - self.assertTrue(len(matmul_op.get_attr_names()), 2) - self.assertTrue(len(matmul_op.get_output_names()), 1) + self.assertEqual(parent_ops_num, 4) + self.assertEqual(tanh_op.num_results(), 1) + self.assertEqual(len(matmul_op.get_input_names()), 2) + self.assertEqual(len(matmul_op.get_attr_names()), 2) + self.assertEqual(len(matmul_op.get_output_names()), 1) def test_value(self): newir_program = get_ir_program() matmul_op = newir_program.block().get_ops()[1] add_op = newir_program.block().get_ops()[2] tanh_op = newir_program.block().get_ops()[3] - self.assertTrue( + self.assertEqual( matmul_op.results()[0].get_defining_op().name(), "pd.matmul" ) - self.assertTrue( + self.assertEqual( matmul_op.result(0).get_defining_op().name(), "pd.matmul" ) matmul_op.result(0).set_stop_gradient(True) - self.assertTrue(matmul_op.result(0).get_stop_gradient, True) + self.assertEqual(matmul_op.result(0).get_stop_gradient(), True) - self.assertTrue( - tanh_op.operands()[0].source().get_defining_op(), "pd.add" + self.assertEqual( + tanh_op.operands()[0].source().get_defining_op().name(), "pd.add" ) add_op.replace_all_uses_with(matmul_op.results()) - self.assertTrue( - tanh_op.operands()[0].source().get_defining_op(), "pd.matmul" + self.assertEqual( + tanh_op.operands()[0].source().get_defining_op().name(), "pd.matmul" ) - self.assertTrue(add_op.result(0).use_empty(), False) + self.assertEqual(add_op.result(0).use_empty(), True) def test_type(self): newir_program = get_ir_program() matmul_op = newir_program.block().get_ops()[1] add_op = newir_program.block().get_ops()[2] - matmul_op.result(0).type().print() - self.assertTrue( + print(matmul_op.result(0).type()) + self.assertEqual( matmul_op.result(0).type() == add_op.result(0).type(), True ) def test_utils(self): newir_program = get_ir_program() matmul_op = newir_program.block().get_ops()[1] - print(ir.get_op_result_dtype(matmul_op.result(0)).print()) + print(ir.get_op_result_dtype(matmul_op.result(0))) self.assertEqual(ir.get_op_result_shape(matmul_op.result(0)), [4, 4])