未验证 提交 02e6347d 编写于 作者: Y YuanRisheng 提交者: GitHub

[New IR]Add attrs Interface for Python (#55974)

* add attrs and dtype interface

* fix compile bugs

* fix some bugs

* fix windows bugs
上级 6b10c0e5
...@@ -23,7 +23,34 @@ ir::OpResult mean(ir::OpResult x, std::vector<int64_t> axis, bool keepdim) { ...@@ -23,7 +23,34 @@ ir::OpResult mean(ir::OpResult x, std::vector<int64_t> axis, bool keepdim) {
paddle::dialect::MeanOp mean_op = paddle::dialect::MeanOp mean_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::MeanOp>( APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::MeanOp>(
x, axis, keepdim); x, axis, keepdim);
return mean_op.result(0); return mean_op.out();
}
ir::OpResult sum(ir::OpResult x,
std::vector<int64_t> axis,
phi::DataType dtype,
bool keepdim) {
paddle::dialect::SumOp sum_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::SumOp>(
x, axis, dtype, keepdim);
return sum_op.out();
}
ir::OpResult divide(ir::OpResult x, ir::OpResult y) {
paddle::dialect::DivideOp divide_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::DivideOp>(x,
y);
return divide_op.out();
}
ir::OpResult full(std::vector<int64_t> shape,
float value,
phi::DataType dtype,
phi::Place place) {
paddle::dialect::FullOp full_op =
APIBuilder::Instance().GetBuilder()->Build<paddle::dialect::FullOp>(
shape, value, dtype, place);
return full_op.out();
} }
} // namespace dialect } // namespace dialect
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <vector> #include <vector>
#include "paddle/ir/core/value.h" #include "paddle/ir/core/value.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/place.h"
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
...@@ -25,5 +27,17 @@ ir::OpResult mean(ir::OpResult x, ...@@ -25,5 +27,17 @@ ir::OpResult mean(ir::OpResult x,
std::vector<int64_t> axis = {}, std::vector<int64_t> axis = {},
bool keepdim = false); bool keepdim = false);
ir::OpResult sum(ir::OpResult x,
std::vector<int64_t> axis = {},
phi::DataType dtype = phi::DataType::UNDEFINED,
bool keepdim = false);
ir::OpResult divide(ir::OpResult x, ir::OpResult y);
ir::OpResult full(std::vector<int64_t> shape,
float value,
phi::DataType dtype = phi::DataType::FLOAT32,
phi::Place place = phi::CPUPlace());
} // namespace dialect } // namespace dialect
} // namespace paddle } // namespace paddle
...@@ -16,13 +16,35 @@ ...@@ -16,13 +16,35 @@
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/dialect/pd_type_storage.h" #include "paddle/fluid/ir/dialect/pd_type_storage.h"
#include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_type.h" #include "paddle/ir/core/builtin_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h" #include "paddle/phi/common/scalar.h"
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
using VariantType = paddle::variant<bool,
int,
int64_t,
float,
double,
std::string,
std::vector<bool>,
std::vector<int>,
std::vector<int64_t>,
std::vector<float>,
std::vector<double>,
std::vector<std::string>,
phi::Scalar,
std::vector<phi::Scalar>,
phi::IntArray,
phi::DataType,
phi::DataLayout,
phi::Place>;
// TODO(zhangbo): The builtin type needs to cover all data types of // TODO(zhangbo): The builtin type needs to cover all data types of
// phi::DataType. // phi::DataType.
static inline phi::DataType TransToPhiDataType(ir::Type dtype) { static inline phi::DataType TransToPhiDataType(ir::Type dtype) {
...@@ -58,7 +80,7 @@ static inline phi::DataType TransToPhiDataType(ir::Type dtype) { ...@@ -58,7 +80,7 @@ static inline phi::DataType TransToPhiDataType(ir::Type dtype) {
} }
static inline ir::Type TransToIrDataType(phi::DataType dtype, static inline ir::Type TransToIrDataType(phi::DataType dtype,
ir::IrContext *ctx = nullptr) { ir::IrContext* ctx = nullptr) {
if (ctx == nullptr) { if (ctx == nullptr) {
ctx = ir::IrContext::Instance(); ctx = ir::IrContext::Instance();
} }
...@@ -96,7 +118,7 @@ static inline ir::Type TransToIrDataType(phi::DataType dtype, ...@@ -96,7 +118,7 @@ static inline ir::Type TransToIrDataType(phi::DataType dtype,
} }
static inline ir::Attribute TransToIrAttribute(phi::Scalar scalar, static inline ir::Attribute TransToIrAttribute(phi::Scalar scalar,
ir::IrContext *ctx = nullptr) { ir::IrContext* ctx = nullptr) {
if (ctx == nullptr) { if (ctx == nullptr) {
ctx = ir::IrContext::Instance(); ctx = ir::IrContext::Instance();
} }
...@@ -119,5 +141,155 @@ static inline ir::Attribute TransToIrAttribute(phi::Scalar scalar, ...@@ -119,5 +141,155 @@ static inline ir::Attribute TransToIrAttribute(phi::Scalar scalar,
} }
} }
enum class AttrType {
UNDEFINED = 0,
BOOL,
INT32,
INT64,
FLOAT,
DOUBLE,
ARRAY,
INT_ARRAY,
SCALAR,
DATA_TYPE,
DATA_LAYOUT,
PLACE,
STRING,
NUM_ATTR_TYPES,
};
static inline AttrType GetAttributeType(const ir::Attribute& attr) {
if (attr.isa<ir::BoolAttribute>()) {
return AttrType::BOOL;
} else if (attr.isa<ir::FloatAttribute>()) {
return AttrType::FLOAT;
} else if (attr.isa<ir::DoubleAttribute>()) {
return AttrType::DOUBLE;
} else if (attr.isa<ir::Int32Attribute>()) {
return AttrType::INT32;
} else if (attr.isa<ir::Int64Attribute>()) {
return AttrType::INT64;
} else if (attr.isa<ir::ArrayAttribute>()) {
return AttrType::ARRAY;
} else if (attr.isa<ir::StrAttribute>()) {
return AttrType::STRING;
} else if (attr.isa<paddle::dialect::IntArrayAttribute>()) {
return AttrType::INT_ARRAY;
} else if (attr.isa<paddle::dialect::DataTypeAttribute>()) {
return AttrType::DATA_TYPE;
} else if (attr.isa<paddle::dialect::PlaceAttribute>()) {
return AttrType::PLACE;
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unsupported ir Attribute type when casting it into "
"AttrType."));
}
}
static std::unordered_map<AttrType,
std::function<VariantType(const ir::Attribute& attr)>>
attr_cast_map = {
{AttrType::BOOL,
[](const ir::Attribute& attr) {
return VariantType{attr.dyn_cast<ir::BoolAttribute>().data()};
}},
{AttrType::FLOAT,
[](const ir::Attribute& attr) {
return VariantType{attr.dyn_cast<ir::FloatAttribute>().data()};
}},
{AttrType::DOUBLE,
[](const ir::Attribute& attr) {
return VariantType{attr.dyn_cast<ir::DoubleAttribute>().data()};
}},
{AttrType::INT32,
[](const ir::Attribute& attr) {
return VariantType{attr.dyn_cast<ir::Int32Attribute>().data()};
}},
{AttrType::INT64,
[](const ir::Attribute& attr) {
return VariantType{attr.dyn_cast<ir::Int64Attribute>().data()};
}},
{AttrType::INT_ARRAY,
[](const ir::Attribute& attr) {
return VariantType{
attr.dyn_cast<paddle::dialect::IntArrayAttribute>()
.data()
.GetData()};
}},
{AttrType::STRING,
[](const ir::Attribute& attr) {
return VariantType{attr.dyn_cast<ir::StrAttribute>().AsString()};
}},
{AttrType::DATA_TYPE,
[](const ir::Attribute& attr) {
return VariantType{
attr.dyn_cast<paddle::dialect::DataTypeAttribute>().data()};
}},
{AttrType::PLACE,
[](const ir::Attribute& attr) {
return VariantType{
attr.dyn_cast<paddle::dialect::PlaceAttribute>().data()};
}},
{AttrType::ARRAY,
[](const ir::Attribute& attr) {
auto attr_vec = attr.dyn_cast<ir::ArrayAttribute>().AsVector();
if (attr_vec.size() == 0) {
return VariantType{std::vector<int>()};
}
AttrType element_type = GetAttributeType(attr_vec[0]);
if (element_type == AttrType::BOOL) {
std::vector<bool> vec_bools;
for (auto vec_element : attr_vec) {
vec_bools.push_back(
vec_element.dyn_cast<ir::BoolAttribute>().data());
}
return VariantType{vec_bools};
} else if (element_type == AttrType::INT32) {
std::vector<int> vec_int32;
for (auto vec_element : attr_vec) {
vec_int32.push_back(
vec_element.dyn_cast<ir::Int32Attribute>().data());
}
return VariantType{vec_int32};
} else if (element_type == AttrType::INT64) {
std::vector<int64_t> vec_int64;
for (auto vec_element : attr_vec) {
vec_int64.push_back(
vec_element.dyn_cast<ir::Int64Attribute>().data());
}
return VariantType{vec_int64};
} else if (element_type == AttrType::FLOAT) {
std::vector<float> vec_float;
for (auto vec_element : attr_vec) {
vec_float.push_back(
vec_element.dyn_cast<ir::FloatAttribute>().data());
}
return VariantType{vec_float};
} else if (element_type == AttrType::DOUBLE) {
std::vector<double> vec_double;
for (auto vec_element : attr_vec) {
vec_double.push_back(
vec_element.dyn_cast<ir::DoubleAttribute>().data());
}
return VariantType{vec_double};
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unsupported ir Attribute type when casting it into "
"vector."));
}
}},
};
static inline VariantType GetAttributeData(const ir::Attribute& attr) {
AttrType attr_type = GetAttributeType(attr);
return attr_cast_map[attr_type](attr);
}
} // namespace dialect } // namespace dialect
} // namespace paddle } // namespace paddle
...@@ -124,6 +124,7 @@ set(PYBIND_SRCS ...@@ -124,6 +124,7 @@ set(PYBIND_SRCS
pybind.cc pybind.cc
imperative.cc imperative.cc
inference_api.cc inference_api.cc
ops_api.cc
static_op_function.cc static_op_function.cc
ir.cc ir.cc
graph.cc graph.cc
......
...@@ -51,6 +51,7 @@ extern PyTypeObject* p_string_tensor_type; ...@@ -51,6 +51,7 @@ extern PyTypeObject* p_string_tensor_type;
extern PyTypeObject* g_framework_scope_pytype; extern PyTypeObject* g_framework_scope_pytype;
extern PyTypeObject* g_ir_opresult_pytype; extern PyTypeObject* g_ir_opresult_pytype;
extern PyTypeObject* g_vartype_pytype; extern PyTypeObject* g_vartype_pytype;
extern PyTypeObject* g_data_type_pytype;
extern PyTypeObject* g_place_pytype; extern PyTypeObject* g_place_pytype;
extern PyTypeObject* g_cudaplace_pytype; extern PyTypeObject* g_cudaplace_pytype;
extern PyTypeObject* g_cpuplace_pytype; extern PyTypeObject* g_cpuplace_pytype;
...@@ -644,6 +645,24 @@ paddle::framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ...@@ -644,6 +645,24 @@ paddle::framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
return dtype; return dtype;
} }
paddle::DataType CastPyArg2DataTypeDirectly(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
paddle::DataType dtype;
if (PyObject_TypeCheck(obj, g_data_type_pytype)) {
dtype = ::pybind11::handle(obj).cast<paddle::DataType>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s: argument (position %d) must be "
"one of core.VarDesc.VarType, "
"but got %s",
op_type,
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
return dtype;
}
paddle::framework::Vocab CastPyArg2Vocab(PyObject* obj, ssize_t arg_pos) { paddle::framework::Vocab CastPyArg2Vocab(PyObject* obj, ssize_t arg_pos) {
if (PyDict_Check(obj)) { if (PyDict_Check(obj)) {
paddle::framework::Vocab vocab; paddle::framework::Vocab vocab;
......
...@@ -306,6 +306,10 @@ paddle::DataType CastPyArg2DataType(PyObject* obj, ...@@ -306,6 +306,10 @@ paddle::DataType CastPyArg2DataType(PyObject* obj,
const std::string& op_type, const std::string& op_type,
ssize_t arg_pos); ssize_t arg_pos);
paddle::DataType CastPyArg2DataTypeDirectly(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
#ifdef PADDLE_WITH_DISTRIBUTE #ifdef PADDLE_WITH_DISTRIBUTE
std::shared_ptr<phi::distributed::auto_parallel::TensorDistAttr> std::shared_ptr<phi::distributed::auto_parallel::TensorDistAttr>
CastPyArg2DistAttr(PyObject* obj, ssize_t arg_pos); CastPyArg2DistAttr(PyObject* obj, ssize_t arg_pos);
......
...@@ -22,8 +22,11 @@ ...@@ -22,8 +22,11 @@
#include <unordered_set> #include <unordered_set>
#include <utility> #include <utility>
#include "paddle/fluid/pybind/pybind_variant_caster.h"
#include "paddle/fluid/ir/dialect/pd_dialect.h" #include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/fluid/ir_adaptor/translator/translate.h" #include "paddle/fluid/ir_adaptor/translator/translate.h"
#include "paddle/ir/core/block.h" #include "paddle/ir/core/block.h"
...@@ -123,6 +126,15 @@ void BindOperation(py::module *m) { ...@@ -123,6 +126,15 @@ void BindOperation(py::module *m) {
} }
return op_list; return op_list;
}) })
.def("attrs",
[](Operation &self) -> py::dict {
py::dict attrs_dict;
for (auto &pair : self.attributes()) {
attrs_dict[pair.first.c_str()] =
paddle::dialect::GetAttributeData(pair.second);
}
return attrs_dict;
})
.def("operands_source", .def("operands_source",
[](Operation &self) -> py::list { [](Operation &self) -> py::list {
py::list op_list; py::list op_list;
...@@ -195,17 +207,21 @@ void BindOpOperand(py::module *m) { ...@@ -195,17 +207,21 @@ void BindOpOperand(py::module *m) {
}); });
} }
void BindOpResult(py::module *m) { bool GetStopGradient(const OpResult &self) {
py::class_<OpResult> op_result(*m, "OpResult"); auto *defining_op = self.owner();
g_ir_opresult_pytype = reinterpret_cast<PyTypeObject *>(op_result.ptr()); if (defining_op->HasAttribute(kAttrStopGradients)) {
op_result auto stop_gradients = defining_op->attribute(kAttrStopGradients)
.def("get_defining_op", .dyn_cast<ir::ArrayAttribute>()
&OpResult::GetDefiningOp, .AsVector();
return_value_policy::reference) return stop_gradients[self.GetResultIndex()]
.def("use_empty", &OpResult::use_empty) .dyn_cast<ir::BoolAttribute>()
.def("type", &OpResult::type) .data();
.def("set_stop_gradient", } else {
[](OpResult &self, bool stop_gradient) { return false;
}
}
void SetStopGradient(const OpResult &self, bool stop_gradient) {
auto *defining_op = self.owner(); auto *defining_op = self.owner();
std::vector<ir::Attribute> stop_gradients; std::vector<ir::Attribute> stop_gradients;
if (defining_op->HasAttribute(kAttrStopGradients)) { if (defining_op->HasAttribute(kAttrStopGradients)) {
...@@ -217,34 +233,65 @@ void BindOpResult(py::module *m) { ...@@ -217,34 +233,65 @@ void BindOpResult(py::module *m) {
defining_op->num_results(), defining_op->num_results(),
ir::BoolAttribute::get(ir::IrContext::Instance(), false)); ir::BoolAttribute::get(ir::IrContext::Instance(), false));
} }
stop_gradients[self.GetResultIndex()] = ir::BoolAttribute::get( stop_gradients[self.GetResultIndex()] =
ir::IrContext::Instance(), stop_gradient); ir::BoolAttribute::get(ir::IrContext::Instance(), stop_gradient);
defining_op->set_attribute( defining_op->set_attribute(
kAttrStopGradients, kAttrStopGradients,
ir::ArrayAttribute::get(ir::IrContext::Instance(), ir::ArrayAttribute::get(ir::IrContext::Instance(), stop_gradients));
stop_gradients)); }
})
.def("get_stop_gradient", void BindOpResult(py::module *m) {
[](OpResult &self) { py::class_<OpResult> op_result(*m, "OpResult");
auto *defining_op = self.owner(); g_ir_opresult_pytype = reinterpret_cast<PyTypeObject *>(op_result.ptr());
if (defining_op->HasAttribute(kAttrStopGradients)) { op_result.def("__eq__", &OpResult::operator==)
auto stop_gradients = defining_op->attribute(kAttrStopGradients)
.dyn_cast<ir::ArrayAttribute>()
.AsVector();
return stop_gradients[self.GetResultIndex()]
.dyn_cast<ir::BoolAttribute>()
.data();
} else {
return false;
}
})
.def("__eq__", &OpResult::operator==)
.def("__eq__", .def("__eq__",
[](OpResult &self, Value &other) { [](OpResult &self, Value &other) {
return self.value_impl() == other.impl(); return self.value_impl() == other.impl();
}) })
.def("__hash__", [](OpResult &self) { .def("__hash__",
[](OpResult &self) {
return std::hash<ir::Value>{}(self.dyn_cast<ir::Value>()); return std::hash<ir::Value>{}(self.dyn_cast<ir::Value>());
})
.def("get_defining_op",
&OpResult::GetDefiningOp,
return_value_policy::reference)
.def("use_empty", &OpResult::use_empty)
.def("type", &OpResult::type)
.def_property(
"stop_gradient",
[](OpResult &self) { return GetStopGradient(self); },
[](OpResult &self, bool stop_gradient) {
SetStopGradient(self, stop_gradient);
})
.def_property(
"shape",
[](OpResult &self) {
if (self.type().isa<DenseTensorType>()) {
return phi::vectorize(
self.type().dyn_cast<DenseTensorType>().dims());
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Currently, we can only get shape for dense tensor."));
}
},
[](OpResult &self, const std::vector<int> &shape) {
PADDLE_THROW(phi::errors::InvalidArgument(
"can't set shape when building static graph"));
})
.def_property(
"dtype",
[](OpResult &self) {
if (self.type().isa<DenseTensorType>()) {
return paddle::dialect::TransToPhiDataType(
self.type().dyn_cast<DenseTensorType>().dtype());
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Currently, we can only get dtype for dense tensor."));
}
},
[](OpResult &self, phi::DataType dtype) {
PADDLE_THROW(phi::errors::InvalidArgument(
"can't set dtype when building static graph"));
}); });
} }
...@@ -259,25 +306,6 @@ void BindType(py::module *m) { ...@@ -259,25 +306,6 @@ void BindType(py::module *m) {
} }
void BindUtils(pybind11::module *m) { void BindUtils(pybind11::module *m) {
m->def("get_op_result_shape", [](const OpResult &op_result) {
if (op_result.type().isa<DenseTensorType>()) {
return phi::vectorize(
op_result.type().dyn_cast<DenseTensorType>().dims());
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"get_op_result_shape currently only support op_result that is a "
"DenseTensorType"));
}
});
m->def("get_op_result_dtype", [](const OpResult &op_result) {
if (op_result.type().isa<DenseTensorType>()) {
return op_result.type().dyn_cast<DenseTensorType>().dtype();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"get_op_result_dtype currently only support op_result that is a "
"DenseTensorType"));
}
});
m->def("set_global_program", m->def("set_global_program",
[](Program *program) { APIBuilder::Instance().SetProgram(program); }); [](Program *program) { APIBuilder::Instance().SetProgram(program); });
m->def("set_insertion_point", m->def("set_insertion_point",
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pybind11/pybind11.h>
#include "paddle/fluid/pybind/static_op_function.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace pybind {
static PyObject *mean(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_mean(self, args, kwargs);
}
static PyObject *sum(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_sum(self, args, kwargs);
}
static PyObject *full(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_full(self, args, kwargs);
}
static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_divide(self, args, kwargs);
}
static PyMethodDef OpsAPI[] = {{"mean",
(PyCFunction)(void (*)(void))mean,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for mean."},
{"sum",
(PyCFunction)(void (*)(void))sum,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for sum."},
{"divide",
(PyCFunction)(void (*)(void))divide,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for divide."},
{"full",
(PyCFunction)(void (*)(void))full,
METH_VARARGS | METH_KEYWORDS,
"C++ interface function for full."},
{nullptr, nullptr, 0, nullptr}};
void BindOpsAPI(pybind11::module *module) {
if (PyModule_AddFunctions(module->ptr(), OpsAPI) < 0) {
PADDLE_THROW(phi::errors::Fatal("Add C++ api to core.ops failed!"));
}
}
} // namespace pybind
} // namespace paddle
...@@ -222,6 +222,7 @@ namespace pybind { ...@@ -222,6 +222,7 @@ namespace pybind {
PyTypeObject *g_framework_scope_pytype = nullptr; PyTypeObject *g_framework_scope_pytype = nullptr;
PyTypeObject *g_framework_lodtensorarray_pytype = nullptr; PyTypeObject *g_framework_lodtensorarray_pytype = nullptr;
PyTypeObject *g_custom_op_kernel_ctx_pytype = nullptr; PyTypeObject *g_custom_op_kernel_ctx_pytype = nullptr;
PyTypeObject *g_data_type_pytype = nullptr;
bool IsCompiledWithAVX() { bool IsCompiledWithAVX() {
#ifndef PADDLE_WITH_AVX #ifndef PADDLE_WITH_AVX
...@@ -2753,6 +2754,26 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2753,6 +2754,26 @@ All parameter, weight, gradient are variables in Paddle.
BindPlace(m); BindPlace(m);
BindTensor(m); BindTensor(m);
py::enum_<phi::DataType> data_type(m, "DataType");
g_data_type_pytype = (PyTypeObject *)data_type.ptr(); // NOLINT
data_type.value("UNDEFINED", phi::DataType::UNDEFINED)
.value("BOOL", phi::DataType::BOOL)
.value("UINT8", phi::DataType::UINT8)
.value("INT8", phi::DataType::INT8)
.value("UINT16", phi::DataType::UINT16)
.value("INT16", phi::DataType::INT16)
.value("UINT32", phi::DataType::UINT32)
.value("INT32", phi::DataType::INT32)
.value("UINT64", phi::DataType::UINT64)
.value("INT64", phi::DataType::INT64)
.value("FLOAT32", phi::DataType::FLOAT32)
.value("FLOAT64", phi::DataType::FLOAT64)
.value("COMPLEX64", phi::DataType::COMPLEX64)
.value("COMPLEX128", phi::DataType::COMPLEX128)
.value("FLOAT16", phi::DataType::FLOAT16)
.value("BFLOAT16", phi::DataType::BFLOAT16)
.export_values();
#if defined(PADDLE_WITH_PSLIB) && !defined(PADDLE_WITH_HETERPS) #if defined(PADDLE_WITH_PSLIB) && !defined(PADDLE_WITH_HETERPS)
BindHeterWrapper(&m); BindHeterWrapper(&m);
BindMetrics(&m); BindMetrics(&m);
......
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <Python.h>
#include "paddle/fluid/pybind/static_op_function.h"
#include "paddle/fluid/ir/dialect/pd_api.h" #include "paddle/fluid/ir/dialect/pd_api.h"
#include "paddle/fluid/pybind/eager_utils.h" #include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/exception.h"
...@@ -46,19 +47,74 @@ PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs) { ...@@ -46,19 +47,74 @@ PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs) {
} }
} }
static PyObject *mean(PyObject *self, PyObject *args, PyObject *kwargs) { PyObject *static_api_sum(PyObject *self, PyObject *args, PyObject *kwargs) {
return static_api_mean(self, args, kwargs); try {
VLOG(6) << "Add sum op into program";
VLOG(8) << "args count: " << (PyTuple_Size(args) / 2);
// Get OpResult from args
PyObject *x_obj = PyTuple_GET_ITEM(args, 0);
auto x = CastPyArg2OpResult("sum", x_obj, 0);
// Parse Attributes if needed
PyObject *axis_obj = PyTuple_GET_ITEM(args, 1);
paddle::experimental::IntArray axis =
CastPyArg2IntArray(axis_obj, "sum", 1);
PyObject *dtype_obj = PyTuple_GET_ITEM(args, 2);
phi::DataType dtype = CastPyArg2DataType(dtype_obj, "sum", 2);
PyObject *keepdim_obj = PyTuple_GET_ITEM(args, 3);
bool keepdim = CastPyArg2Boolean(keepdim_obj, "sum", 3);
// Call ir static api
auto out = paddle::dialect::sum(x, axis.GetData(), dtype, keepdim);
return ToPyObject(out);
} catch (...) {
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
}
PyObject *static_api_divide(PyObject *self, PyObject *args, PyObject *kwargs) {
try {
VLOG(6) << "Add divide op into program";
VLOG(8) << "args count: " << (PyTuple_Size(args) / 2);
// Get OpResult from args
PyObject *x_obj = PyTuple_GET_ITEM(args, 0);
auto x = CastPyArg2OpResult("divide", x_obj, 0);
PyObject *y_obj = PyTuple_GET_ITEM(args, 1);
auto y = CastPyArg2OpResult("divide", y_obj, 1);
// Call ir static api
auto out = paddle::dialect::divide(x, y);
return ToPyObject(out);
} catch (...) {
ThrowExceptionToPython(std::current_exception());
return nullptr;
}
} }
static PyMethodDef OpsAPI[] = {{"mean", PyObject *static_api_full(PyObject *self, PyObject *args, PyObject *kwargs) {
(PyCFunction)(void (*)(void))mean, try {
METH_VARARGS | METH_KEYWORDS, VLOG(6) << "Add full op into program";
"C++ interface function for mean."}, VLOG(8) << "args count: " << (PyTuple_Size(args) / 2);
{nullptr, nullptr, 0, nullptr}};
void BindOpsAPI(pybind11::module *module) { // Parse Attributes if needed
if (PyModule_AddFunctions(module->ptr(), OpsAPI) < 0) { PyObject *shape_obj = PyTuple_GET_ITEM(args, 0);
PADDLE_THROW(phi::errors::Fatal("Add C++ api to core.ops failed!")); paddle::experimental::IntArray shape =
CastPyArg2IntArray(shape_obj, "full", 0);
PyObject *value_obj = PyTuple_GET_ITEM(args, 1);
paddle::experimental::Scalar value = CastPyArg2Scalar(value_obj, "full", 1);
PyObject *dtype_obj = PyTuple_GET_ITEM(args, 2);
phi::DataType dtype = CastPyArg2DataTypeDirectly(dtype_obj, "full", 2);
PyObject *place_obj = PyTuple_GET_ITEM(args, 3);
paddle::Place place = CastPyArg2Place(place_obj, "full", 3);
// Call ir static api
auto out =
paddle::dialect::full(shape.GetData(), value.to<float>(), dtype, place);
return ToPyObject(out);
} catch (...) {
ThrowExceptionToPython(std::current_exception());
return nullptr;
} }
} }
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <Python.h>
// Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign
#undef copysign
#endif
namespace paddle {
namespace pybind {
PyObject *static_api_mean(PyObject *self, PyObject *args, PyObject *kwargs);
PyObject *static_api_sum(PyObject *self, PyObject *args, PyObject *kwargs);
PyObject *static_api_divide(PyObject *self, PyObject *args, PyObject *kwargs);
PyObject *static_api_full(PyObject *self, PyObject *args, PyObject *kwargs);
} // namespace pybind
} // namespace paddle
...@@ -34,6 +34,7 @@ from .proto import framework_pb2, data_feed_pb2 ...@@ -34,6 +34,7 @@ from .proto import framework_pb2, data_feed_pb2
from . import core from . import core
from . import unique_name from . import unique_name
from .. import ir
import paddle.version as fluid_version import paddle.version as fluid_version
import warnings import warnings
import functools import functools
...@@ -1005,7 +1006,7 @@ def convert_np_dtype_to_dtype_(np_dtype): ...@@ -1005,7 +1006,7 @@ def convert_np_dtype_to_dtype_(np_dtype):
string. string.
Returns: Returns:
core.VarDesc.VarType: The data type in Paddle. core.VarDesc.VarType / core.DataType : The data type in Paddle.
""" """
# Convert the data type string to numpy data type. # Convert the data type string to numpy data type.
...@@ -1014,6 +1015,36 @@ def convert_np_dtype_to_dtype_(np_dtype): ...@@ -1014,6 +1015,36 @@ def convert_np_dtype_to_dtype_(np_dtype):
else: else:
dtype = np.dtype(np_dtype) dtype = np.dtype(np_dtype)
if ir.core._use_new_ir_api():
if dtype == np.float32:
return core.DataType.FLOAT32
elif dtype == np.float64:
return core.DataType.FLOAT64
elif dtype == np.float16:
return core.DataType.FLOAT16
elif dtype == np.int32:
return core.DataType.INT32
elif dtype == np.int16:
return core.DataType.INT16
elif dtype == np.int64:
return core.DataType.INT64
elif dtype == np.bool_:
return core.DataType.BOOL
elif dtype == np.uint16:
# since there is still no support for bfloat16 in NumPy,
# uint16 is used for casting bfloat16
return core.DataType.UINT16
elif dtype == np.uint8:
return core.DataType.UINT8
elif dtype == np.int8:
return core.DataType.INT8
elif dtype == np.complex64:
return core.DataType.COMPLEX64
elif dtype == np.complex128:
return core.DataType.COMPLEX128
else:
raise ValueError("Not supported numpy dtype %s" % dtype)
else:
if dtype == np.float32: if dtype == np.float32:
return core.VarDesc.VarType.FP32 return core.VarDesc.VarType.FP32
elif dtype == np.float64: elif dtype == np.float64:
......
...@@ -22,8 +22,6 @@ from paddle.fluid.libpaddle.ir import ( ...@@ -22,8 +22,6 @@ from paddle.fluid.libpaddle.ir import (
Type, Type,
) # noqa: F401 ) # noqa: F401
from paddle.fluid.libpaddle.ir import ( from paddle.fluid.libpaddle.ir import (
get_op_result_shape,
get_op_result_dtype,
translate_to_new_ir, translate_to_new_ir,
set_global_program, set_global_program,
set_insertion_point, set_insertion_point,
...@@ -41,7 +39,5 @@ __all__ = [ # noqa ...@@ -41,7 +39,5 @@ __all__ = [ # noqa
'OpOperand', 'OpOperand',
'OpResult', 'OpResult',
'Type', 'Type',
'get_op_result_shape',
'get_op_result_dtype',
'translate_to_new_ir', 'translate_to_new_ir',
] ]
...@@ -84,7 +84,7 @@ import paddle ...@@ -84,7 +84,7 @@ import paddle
if paddle.ir.core._use_new_ir_api(): if paddle.ir.core._use_new_ir_api():
from ..ir import Program # noqa: F401 from ..ir import Program # noqa: F401
from ..ir import program_guard # noqa: F401 from ..ir.core import program_guard # noqa: F401
else: else:
from ..fluid.framework import program_guard # noqa: F401 from ..fluid.framework import program_guard # noqa: F401
from ..fluid.framework import Program # noqa: F401 from ..fluid.framework import Program # noqa: F401
......
...@@ -888,6 +888,21 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): ...@@ -888,6 +888,21 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
out.stop_gradient = True out.stop_gradient = True
return out return out
else: else:
if paddle.ir.core._use_new_ir_api():
# Below code will be removed after we can generate IR api automatically
place = _current_expected_place()
if force_cpu:
place = core.CPUPlace()
if isinstance(shape, (list, tuple)):
shape = paddle.utils.convert_shape_to_list(shape)
if not isinstance(dtype, core.DataType):
dtype = convert_np_dtype_to_dtype_(dtype)
if out is None:
out = paddle._ir_ops.full(shape, float(value), dtype, place)
out.stop_gradient = True
return out
attrs = {'force_cpu': force_cpu} attrs = {'force_cpu': force_cpu}
dtype = convert_dtype(dtype) dtype = convert_dtype(dtype)
if not isinstance(value, Variable): if not isinstance(value, Variable):
......
...@@ -877,6 +877,8 @@ def divide(x, y, name=None): ...@@ -877,6 +877,8 @@ def divide(x, y, name=None):
if in_dynamic_mode(): if in_dynamic_mode():
return _C_ops.divide(x, y) return _C_ops.divide(x, y)
else: else:
if paddle.ir.core._use_new_ir_api():
return paddle._ir_ops.divide(x, y)
return _elementwise_op(LayerHelper('elementwise_div', **locals())) return _elementwise_op(LayerHelper('elementwise_div', **locals()))
...@@ -1474,6 +1476,8 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1474,6 +1476,8 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if in_dynamic_mode(): if in_dynamic_mode():
return _C_ops.sum(x, axis, dtype, keepdim) return _C_ops.sum(x, axis, dtype, keepdim)
else: else:
if paddle.ir.core._use_new_ir_api():
return paddle._ir_ops.sum(x, axis, dtype, keepdim)
reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all} attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
......
...@@ -53,6 +53,7 @@ class TestBuildOp(unittest.TestCase): ...@@ -53,6 +53,7 @@ class TestBuildOp(unittest.TestCase):
.name(), .name(),
"pd.tanh", "pd.tanh",
) )
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False})
def test_insertion_point(self): def test_insertion_point(self):
newir_program = get_ir_program() newir_program = get_ir_program()
...@@ -64,13 +65,19 @@ class TestBuildOp(unittest.TestCase): ...@@ -64,13 +65,19 @@ class TestBuildOp(unittest.TestCase):
with paddle.ir.core.program_guard(newir_program): with paddle.ir.core.program_guard(newir_program):
ir.set_insertion_point(tanh_op) ir.set_insertion_point(tanh_op)
out = paddle.mean(add_out) full_out = paddle.tensor.fill_constant(
shape=[4, 4], dtype="float", value=2
)
divide_out = paddle.divide(full_out, full_out)
sum_out = paddle.sum(divide_out)
out = paddle.mean(sum_out)
tanh_operand.set_source(out) tanh_operand.set_source(out)
print(newir_program) print(newir_program)
self.assertEqual( self.assertEqual(
tanh_operand.source().get_defining_op().name(), "pd.mean" tanh_operand.source().get_defining_op().name(), "pd.mean"
) )
paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False})
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -75,14 +75,19 @@ class TestPybind(unittest.TestCase): ...@@ -75,14 +75,19 @@ class TestPybind(unittest.TestCase):
matmul_op = newir_program.block().get_ops()[1] matmul_op = newir_program.block().get_ops()[1]
add_op = newir_program.block().get_ops()[2] add_op = newir_program.block().get_ops()[2]
tanh_op = newir_program.block().get_ops()[3] tanh_op = newir_program.block().get_ops()[3]
self.assertEqual(
matmul_op.result(0).dtype, paddle.fluid.core.DataType.FLOAT32
)
self.assertEqual(matmul_op.result(0).shape, [4, 4])
self.assertEqual( self.assertEqual(
matmul_op.results()[0].get_defining_op().name(), "pd.matmul" matmul_op.results()[0].get_defining_op().name(), "pd.matmul"
) )
self.assertEqual( self.assertEqual(
matmul_op.result(0).get_defining_op().name(), "pd.matmul" matmul_op.result(0).get_defining_op().name(), "pd.matmul"
) )
matmul_op.result(0).set_stop_gradient(True) matmul_op.result(0).stop_gradient = True
self.assertEqual(matmul_op.result(0).get_stop_gradient(), True) self.assertEqual(matmul_op.result(0).stop_gradient, True)
# test opresult hash # test opresult hash
result_set = set() result_set = set()
...@@ -125,11 +130,39 @@ class TestPybind(unittest.TestCase): ...@@ -125,11 +130,39 @@ class TestPybind(unittest.TestCase):
matmul_op.result(0).type() == add_op.result(0).type(), True matmul_op.result(0).type() == add_op.result(0).type(), True
) )
def test_utils(self): def test_attr(self):
newir_program = get_ir_program() main_program, start_program = (
matmul_op = newir_program.block().get_ops()[1] paddle.static.Program(),
print(ir.get_op_result_dtype(matmul_op.result(0))) paddle.static.Program(),
self.assertEqual(ir.get_op_result_shape(matmul_op.result(0)), [4, 4]) )
with paddle.static.program_guard(main_program, start_program):
conv_data = paddle.static.data(
'conv_data', [None, 3, 32, 32], dtype='float32'
)
conv2d_out = paddle.static.nn.conv2d(
input=conv_data,
num_filters=2,
filter_size=3,
stride=3,
act="relu",
)
full_out = paddle.tensor.fill_constant(
shape=[4, 4], dtype="float32", value=2
)
newir_program = ir.translate_to_new_ir(main_program.desc)
print(newir_program)
conv_attr = newir_program.block().get_ops()[3].attrs()
full_attr = newir_program.block().get_ops()[8].attrs()
self.assertEqual(conv_attr["stop_gradient"], [False])
self.assertEqual(conv_attr["dilations"], [1, 1])
self.assertEqual(conv_attr["data_format"], "NCHW")
self.assertEqual(conv_attr["strides"], [3, 3])
self.assertEqual(conv_attr["paddings"], [0, 0])
self.assertEqual(conv_attr["padding_algorithm"], "EXPLICIT")
self.assertEqual(conv_attr["groups"], 1)
self.assertEqual(full_attr["dtype"], paddle.fluid.core.DataType.FLOAT32)
self.assertTrue(isinstance(full_attr["place"], paddle.fluid.core.Place))
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册