diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index e281c8c48f5041f9ca65ca532e68549a05980750..8248dcd36398bc12053c4a392168fd3015d92c15 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -8,5 +8,8 @@ add_subdirectory(pybind) add_subdirectory(eager) add_subdirectory(prim) add_subdirectory(jit) +if(WITH_NEWIR) + add_subdirectory(dialect) +endif() # NOTE: please add subdirectory inference at last. add_subdirectory(inference) diff --git a/paddle/fluid/dialect/CMakeLists.txt b/paddle/fluid/dialect/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..644b9913794250f0025f09a8ac7c660825eafd9f --- /dev/null +++ b/paddle/fluid/dialect/CMakeLists.txt @@ -0,0 +1,9 @@ +set(PD_DIALECT_SOURCE_DIR "${PADDLE_SOURCE_DIR}/paddle/fluid/dialect") +set(PD_DIALECT_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/fluid/dialect") + +file(GLOB PD_DIALECT_SRCS "*.cc") + +cc_library( + pd_dialect + SRCS ${PD_DIALECT_SRCS} + DEPS new_ir framework_proto dense_tensor) diff --git a/paddle/fluid/dialect/pd_dialect.cc b/paddle/fluid/dialect/pd_dialect.cc new file mode 100644 index 0000000000000000000000000000000000000000..df98a9978803a07e23a9e970d5464f9c61dd2d99 --- /dev/null +++ b/paddle/fluid/dialect/pd_dialect.cc @@ -0,0 +1,97 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/dialect/pd_dialect.h" +#include "paddle/fluid/dialect/pd_type.h" +#include "paddle/fluid/dialect/utils.h" +#include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/ir/builtin_type.h" +#include "paddle/ir/dialect_interface.h" +#include "paddle/phi/core/dense_tensor.h" + +namespace paddle { +namespace dialect { +std::shared_ptr +ParameterConvertInterface::ParameterToVariable(ir::Parameter* parameter) { + if (parameter->type().isa()) { + VLOG(4) << "Convert a DenseTensor Parameter to a variable."; + std::shared_ptr var = + std::make_shared(); + phi::DenseTensor* tensor = var->GetMutable(); + // Init DenseTensor + auto dim = parameter->type().dyn_cast().dim(); + phi::DenseTensorMeta meta( + TransToPhiDataType( + parameter->type().dyn_cast().dtype()), + phi::DDim(dim.data(), dim.size()), + TransToPhiDataLayout( + parameter->type().dyn_cast().data_layout()), + parameter->type().dyn_cast().lod(), + parameter->type().dyn_cast().offset()); + tensor->set_meta(meta); + paddle::platform::DeviceContext* dev_ctx = + paddle::platform::DeviceContextPool::Instance().Get( + paddle::platform::CPUPlace()); + dev_ctx->Alloc(tensor, + TransToPhiDataType( + parameter->type().dyn_cast().dtype())); + memcpy(tensor->data(), + parameter->data(), + tensor->numel() * phi::SizeOf(tensor->dtype())); + return var; + } else { + return nullptr; + } +} + +std::unique_ptr ParameterConvertInterface::VariableToParameter( + paddle::framework::Variable* var) { + if (var->IsType()) { + phi::DenseTensor* tensor = var->GetMutable(); + // Get Meta + ir::IrContext* ctx = ir::IrContext::Instance(); + ir::Type data_type = TransToIrDataType(tensor->dtype(), ctx); + DenseTensorTypeStorage::Dim dims(tensor->dims().size()); + std::copy(tensor->dims().Get(), + tensor->dims().Get() + tensor->dims().size(), + dims.data()); + DenseTensorTypeStorage::DataLayout data_layout = + TransToIrDataLayout(tensor->layout()); + DenseTensorTypeStorage::LoD lod = tensor->lod(); + size_t offset = tensor->meta().offset; + void* data = tensor->data(); + ir::Type dense_tensor_type = + DenseTensorType::get(ctx, data_type, dims, data_layout, lod, offset); + return std::make_unique( + data, + tensor->numel() * phi::SizeOf(tensor->dtype()), + dense_tensor_type); + } else { + return nullptr; + } +} + +PaddleDialect::PaddleDialect(ir::IrContext* context) + : ir::Dialect(name(), context, ir::TypeId::get()) { + initialize(); +} + +void PaddleDialect::initialize() { + RegisterTypes(); + RegisterInterfaces(); +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/dialect/pd_dialect.h b/paddle/fluid/dialect/pd_dialect.h new file mode 100644 index 0000000000000000000000000000000000000000..a81ff7cd48ceb24b6d30505ecaf3211d31ae8d85 --- /dev/null +++ b/paddle/fluid/dialect/pd_dialect.h @@ -0,0 +1,47 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/variable.h" +#include "paddle/ir/dialect.h" +#include "paddle/ir/parameter.h" + +namespace paddle { +namespace dialect { +class ParameterConvertInterface + : public ir::DialectInterface::Base { + public: + explicit ParameterConvertInterface(ir::Dialect* dialect) : Base(dialect) {} + + // NOTE(zhangbo): Only support new a CPU Variable. + std::shared_ptr ParameterToVariable( + ir::Parameter* parameter); + + std::unique_ptr VariableToParameter( + paddle::framework::Variable* var); +}; + +class PaddleDialect : public ir::Dialect { + public: + explicit PaddleDialect(ir::IrContext* context); + + static const char* name() { return "pd"; } + + private: + void initialize(); +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/ir/builtin_type.cc b/paddle/fluid/dialect/pd_type.cc similarity index 69% rename from paddle/ir/builtin_type.cc rename to paddle/fluid/dialect/pd_type.cc index 5e18b945016c7156bf1914ddd339e00958aac273..017af3ffd5aea80c2d6c21f29487dbe5d06ee8af 100644 --- a/paddle/ir/builtin_type.cc +++ b/paddle/fluid/dialect/pd_type.cc @@ -12,23 +12,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/builtin_type.h" +#include "paddle/fluid/dialect/pd_type.h" -namespace ir { +namespace paddle { +namespace dialect { const ir::Type& DenseTensorType::dtype() const { return storage()->dtype_; } -const ir::DenseTensorTypeStorage::Dim& DenseTensorType::dim() const { +const paddle::dialect::DenseTensorTypeStorage::Dim& DenseTensorType::dim() + const { return storage()->dims_; } -const ir::DenseTensorTypeStorage::DataLayout& DenseTensorType::data_layout() - const { +const paddle::dialect::DenseTensorTypeStorage::DataLayout& +DenseTensorType::data_layout() const { return storage()->layout_; } -const ir::DenseTensorTypeStorage::LoD& DenseTensorType::lod() const { +const paddle::dialect::DenseTensorTypeStorage::LoD& DenseTensorType::lod() + const { return storage()->lod_; } const size_t& DenseTensorType::offset() const { return storage()->offset_; } -} // namespace ir + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/dialect/pd_type.h b/paddle/fluid/dialect/pd_type.h new file mode 100644 index 0000000000000000000000000000000000000000..2040828a146ac1744c71a1dde0b03a3d027c7eda --- /dev/null +++ b/paddle/fluid/dialect/pd_type.h @@ -0,0 +1,46 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/dialect/pd_type_storage.h" +#include "paddle/ir/type.h" + +namespace paddle { +namespace dialect { +#define GET_PADDLE_TYPE_LIST paddle::dialect::DenseTensorType + +/// +/// \brief Define built-in parameteric types. +/// +class DenseTensorType : public ir::Type { + public: + using Type::Type; + + DECLARE_TYPE_UTILITY_FUNCTOR(DenseTensorType, DenseTensorTypeStorage); + + const ir::Type &dtype() const; + + const paddle::dialect::DenseTensorTypeStorage::Dim &dim() const; + + const paddle::dialect::DenseTensorTypeStorage::DataLayout &data_layout() + const; + + const paddle::dialect::DenseTensorTypeStorage::LoD &lod() const; + + const size_t &offset() const; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/ir/builtin_type_storage.h b/paddle/fluid/dialect/pd_type_storage.h similarity index 98% rename from paddle/ir/builtin_type_storage.h rename to paddle/fluid/dialect/pd_type_storage.h index 132a1656a7975dc2197ceaece4d0f9a386db4ffc..1ea44eccc7347aed918feca808fe0a3e27d55dda 100644 --- a/paddle/ir/builtin_type_storage.h +++ b/paddle/fluid/dialect/pd_type_storage.h @@ -36,7 +36,8 @@ struct hash> { } // namespace std -namespace ir { +namespace paddle { +namespace dialect { /// /// \brief Define Parameteric TypeStorage for DenseTensorType. /// @@ -151,4 +152,5 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { size_t offset_; }; -} // namespace ir +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/dialect/utils.h b/paddle/fluid/dialect/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..69af6fd1ddce80b8aac1f6bb4b1f7fa2764603eb --- /dev/null +++ b/paddle/fluid/dialect/utils.h @@ -0,0 +1,136 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/dialect/pd_type_storage.h" +#include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/ir/builtin_type.h" +#include "paddle/phi/core/dense_tensor.h" + +namespace paddle { +namespace dialect { +// TODO(zhangbo): The builtin type needs to cover all data types of +// phi::DataType. +inline phi::DataType TransToPhiDataType(ir::Type dtype) { + if (dtype.isa()) { + return phi::DataType::FLOAT16; + } else if (dtype.isa()) { + return phi::DataType::FLOAT32; + } else if (dtype.isa()) { + return phi::DataType::FLOAT64; + } else if (dtype.isa()) { + return phi::DataType::INT16; + } else if (dtype.isa()) { + return phi::DataType::INT32; + } else if (dtype.isa()) { + return phi::DataType::INT64; + } else { + PADDLE_THROW(phi::errors::Unimplemented( + "Unsupported ir data type when casting it into " + "phi data type.")); + } +} + +inline ir::Type TransToIrDataType(phi::DataType dtype, + ir::IrContext *ctx = nullptr) { + if (ctx == nullptr) { + ctx = ir::IrContext::Instance(); + } + switch (dtype) { + case phi::DataType::FLOAT16: + return ir::Float16Type::get(ctx); + case phi::DataType::FLOAT32: + return ir::Float32Type::get(ctx); + case phi::DataType::FLOAT64: + return ir::Float64Type::get(ctx); + case phi::DataType::INT16: + return ir::Int16Type::get(ctx); + case phi::DataType::INT32: + return ir::Int32Type::get(ctx); + case phi::DataType::INT64: + return ir::Int64Type::get(ctx); + default: + PADDLE_THROW(phi::errors::Unimplemented( + "Unsupported phi data type `%s` when casting it into " + "ir data type.", + dtype)); + } +} + +inline phi::DataLayout TransToPhiDataLayout( + DenseTensorTypeStorage::DataLayout data_layout) { + switch (data_layout) { + case DenseTensorTypeStorage::DataLayout::NHWC: + return phi::DataLayout::NHWC; + case DenseTensorTypeStorage::DataLayout::NCHW: + return phi::DataLayout::NCHW; + case DenseTensorTypeStorage::DataLayout::NCDHW: + return phi::DataLayout::NCDHW; + case DenseTensorTypeStorage::DataLayout::NDHWC: + return phi::DataLayout::NDHWC; + case DenseTensorTypeStorage::DataLayout::ONEDNN: + return phi::DataLayout::ONEDNN; + case DenseTensorTypeStorage::DataLayout::SPARSE_COO: + return phi::DataLayout::SPARSE_COO; + case DenseTensorTypeStorage::DataLayout::SPARSE_CSR: + return phi::DataLayout::SPARSE_CSR; + case DenseTensorTypeStorage::DataLayout::PSTRING_UNION: + return phi::DataLayout::PSTRING_UNION; + case DenseTensorTypeStorage::DataLayout::NUM_DATA_LAYOUTS: + return phi::DataLayout::NUM_DATA_LAYOUTS; + case DenseTensorTypeStorage::DataLayout::ALL_LAYOUT: + return phi::DataLayout::ALL_LAYOUT; + default: + PADDLE_THROW(phi::errors::Unimplemented( + "Unsupported ir data layout `%s` when casting it into " + "phi data type.", + static_cast(data_layout))); + } +} + +inline DenseTensorTypeStorage::DataLayout TransToIrDataLayout( + phi::DataLayout data_layout) { + switch (data_layout) { + case phi::DataLayout::NHWC: + return DenseTensorTypeStorage::DataLayout::NHWC; + case phi::DataLayout::NCHW: + return DenseTensorTypeStorage::DataLayout::NCHW; + case phi::DataLayout::NCDHW: + return DenseTensorTypeStorage::DataLayout::NCDHW; + case phi::DataLayout::NDHWC: + return DenseTensorTypeStorage::DataLayout::NDHWC; + case phi::DataLayout::ONEDNN: + return DenseTensorTypeStorage::DataLayout::ONEDNN; + case phi::DataLayout::SPARSE_COO: + return DenseTensorTypeStorage::DataLayout::SPARSE_COO; + case phi::DataLayout::SPARSE_CSR: + return DenseTensorTypeStorage::DataLayout::SPARSE_CSR; + case phi::DataLayout::PSTRING_UNION: + return DenseTensorTypeStorage::DataLayout::PSTRING_UNION; + case phi::DataLayout::NUM_DATA_LAYOUTS: + return DenseTensorTypeStorage::DataLayout::NUM_DATA_LAYOUTS; + case phi::DataLayout::ALL_LAYOUT: + return DenseTensorTypeStorage::DataLayout::ALL_LAYOUT; + default: + PADDLE_THROW(phi::errors::Unimplemented( + "Unsupported phi data layout `%s` when casting it into " + "ir data type.", + static_cast(data_layout))); + } +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/ir/builtin_attribute.cc b/paddle/ir/builtin_attribute.cc index 3a1b2acfb716bb9bd0283b015641dac0486172a7..40c0204ce9cfd0917ed3b7ee375890225b14c5c4 100644 --- a/paddle/ir/builtin_attribute.cc +++ b/paddle/ir/builtin_attribute.cc @@ -19,24 +19,4 @@ std::string StrAttribute::data() const { return storage()->GetAsKey(); } uint32_t StrAttribute::size() const { return storage()->GetAsKey().size(); } -NamedAttribute::NamedAttribute(StrAttribute name, Attribute value) - : name_(name), value_(value) {} - -bool NamedAttribute::operator<(const NamedAttribute &right) const { - return name() < right.name(); -} - -bool NamedAttribute::operator==(const NamedAttribute &right) const { - return name() == right.name() && value() == right.value(); -} - -bool NamedAttribute::operator!=(const NamedAttribute &right) const { - return !(*this == right); -} - -Attribute DictionaryAttribute::GetValue(const StrAttribute &name) { - return storage()->GetValue(name); -} - -uint32_t DictionaryAttribute::size() const { return storage()->size(); } } // namespace ir diff --git a/paddle/ir/builtin_attribute.h b/paddle/ir/builtin_attribute.h index 82b5f8eb48aa58d49ed8592f85d76849174817ee..4e2164a1e4d6a9f61540e31ef4f6cdb7d70a15fa 100644 --- a/paddle/ir/builtin_attribute.h +++ b/paddle/ir/builtin_attribute.h @@ -22,7 +22,7 @@ namespace ir { /// /// \brief All built-in attributes. /// -#define GET_BUILT_IN_ATTRIBUTE_LIST ir::StrAttribute, ir::DictionaryAttribute +#define GET_BUILT_IN_ATTRIBUTE_LIST ir::StrAttribute class StrAttribute : public ir::Attribute { public: @@ -39,55 +39,13 @@ class StrAttribute : public ir::Attribute { uint32_t size() const; }; -class NamedAttribute { - public: - NamedAttribute(StrAttribute name, Attribute value); - - StrAttribute name() const { return name_; } - - Attribute value() const { return value_; } - - void SetName(StrAttribute name) { name_ = name; } - - void SetValue(Attribute value) { value_ = value; } - - bool operator<(const NamedAttribute &right) const; - - bool operator==(const NamedAttribute &right) const; - - bool operator!=(const NamedAttribute &right) const; - - friend struct std::hash; - - operator std::pair() const { - return std::make_pair(name_, value_); - } - - private: - StrAttribute name_; - Attribute value_; -}; - -class DictionaryAttribute : public ir::Attribute { - public: - using Attribute::Attribute; - - DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(DictionaryAttribute, - DictionaryAttributeStorage); - - Attribute GetValue(const StrAttribute &name); - - uint32_t size() const; -}; - } // namespace ir namespace std { template <> -struct hash { - std::size_t operator()(const ir::NamedAttribute &obj) const { - return ir::hash_combine(std::hash()(obj.name_), - std::hash()(obj.value_)); +struct hash { + std::size_t operator()(const ir::StrAttribute &obj) const { + return std::hash()(obj.storage()); } }; } // namespace std diff --git a/paddle/ir/builtin_attribute_storage.cc b/paddle/ir/builtin_attribute_storage.cc deleted file mode 100644 index 3f785d20c9b92218aa5b315d6ad4ec94fd1310cb..0000000000000000000000000000000000000000 --- a/paddle/ir/builtin_attribute_storage.cc +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/ir/builtin_attribute_storage.h" -#include "paddle/ir/builtin_attribute.h" -#include "paddle/ir/utils.h" - -namespace ir { - -DictionaryAttributeStorage::DictionaryAttributeStorage(const ParamKey &key) { - size_ = key.size(); - data_ = reinterpret_cast( - malloc(sizeof(NamedAttribute) * size_)); - uint32_t idx = 0; - for (auto iter = key.begin(); iter != key.end(); ++iter) { - data_[idx].SetName(iter->first); - data_[idx].SetValue(iter->second); - idx++; - } -} - -std::size_t DictionaryAttributeStorage::HashValue(const ParamKey &key) { - std::size_t hash_value = key.size(); - for (auto iter = key.begin(); iter != key.end(); ++iter) { - hash_value = ir::hash_combine( - hash_value, - std::hash()(NamedAttribute(iter->first, iter->second))); - } - return hash_value; -} - -bool DictionaryAttributeStorage::operator==(const ParamKey &key) const { - uint32_t size = key.size(); - if (size_ != size) return false; - uint32_t idx = 0; - for (auto iter = key.begin(); iter != key.end(); ++iter) { - if (data_[idx] != NamedAttribute(iter->first, iter->second)) { - return false; - } - idx++; - } - return true; -} - -DictionaryAttributeStorage::ParamKey DictionaryAttributeStorage::GetAsKey() - const { - return ParamKey(data_, data_ + size_); -} - -Attribute DictionaryAttributeStorage::GetValue(const StrAttribute &name) const { - size_t left = 0; - size_t right = size_; - while (left < right) { - size_t mid = left + (right - left) / 2; - if (data_[mid].name() == name) { - return data_[mid].value(); - } else if (data_[mid].name() < name) { - left = mid + 1; - } else { - right = mid; - } - } - return nullptr; -} - -} // namespace ir diff --git a/paddle/ir/builtin_attribute_storage.h b/paddle/ir/builtin_attribute_storage.h index a34648fb17e3581868b8c4b51ee533b30cd1d442..f6f97d5e616ed7038e4ad32f554e9a09bb56f690 100644 --- a/paddle/ir/builtin_attribute_storage.h +++ b/paddle/ir/builtin_attribute_storage.h @@ -54,36 +54,4 @@ struct StrAttributeStorage : public ir::AttributeStorage { uint32_t size_; }; -/// -/// \brief Define Parameteric AttributeStorage for DictionaryAttributeStorage. -/// -class StrAttribute; -class NamedAttribute; -struct DictionaryAttributeStorage : public AttributeStorage { - using ParamKey = std::map; - - explicit DictionaryAttributeStorage(const ParamKey &key); - - ~DictionaryAttributeStorage() { free(data_); } - - static DictionaryAttributeStorage *Construct(ParamKey key) { - return new DictionaryAttributeStorage(key); - } - - static std::size_t HashValue(const ParamKey &key); - - bool operator==(const ParamKey &key) const; - - ParamKey GetAsKey() const; - - Attribute GetValue(const StrAttribute &name) const; - - NamedAttribute *data() const { return data_; } - - uint32_t size() const { return size_; } - - private: - NamedAttribute *data_; - uint32_t size_; -}; } // namespace ir diff --git a/paddle/ir/builtin_dialect.cc b/paddle/ir/builtin_dialect.cc index 144ee272c4c1b0b2b6a6407f507ce53dca76c9a0..5e08798828f5904990c43f382d08447774f3dafa 100644 --- a/paddle/ir/builtin_dialect.cc +++ b/paddle/ir/builtin_dialect.cc @@ -14,6 +14,7 @@ #include "paddle/ir/builtin_dialect.h" #include "paddle/ir/builtin_attribute.h" +#include "paddle/ir/builtin_op.h" #include "paddle/ir/builtin_type.h" namespace ir { @@ -26,6 +27,7 @@ void BuiltinDialect::initialize() { // Register all built-in types defined in builtin_type.h. RegisterTypes(); RegisterAttributes(); + RegisterOps(); } } // namespace ir diff --git a/paddle/ir/builtin_op.cc b/paddle/ir/builtin_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..528631b0b7998d7c30a281f80ac8c6f535b03198 --- /dev/null +++ b/paddle/ir/builtin_op.cc @@ -0,0 +1,22 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/ir/builtin_op.h" + +namespace ir { +const char *GetParameterOp::attributes_name_[] = {"parameter_name"}; + +const char *SetParameterOp::attributes_name_[] = {"parameter_name"}; + +} // namespace ir diff --git a/paddle/ir/builtin_op.h b/paddle/ir/builtin_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f5c4296394a9abf0fe3a35a60539a898902915b7 --- /dev/null +++ b/paddle/ir/builtin_op.h @@ -0,0 +1,57 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/op_base.h" + +namespace ir { +/// +/// \brief This macro is used to get a list of all built-in OPs in this file. +/// The built-in Dialect will use this macro to quickly register all built-in +/// OPs. +/// +#define GET_BUILT_IN_OP_LIST ir::GetParameterOp, ir::SetParameterOp + +/// +/// \brief GetParameterOp: OpResult = GetParameterOp({StrAttribute, +/// StrAttribute}) +/// +class GetParameterOp : public ir::Op { + public: + using Op::Op; + + static const char* name() { return "GetParameterOp"; } + + static uint32_t attributes_num() { return 1; } + + static const char* attributes_name_[]; +}; + +/// +/// \brief GetParameterOp: SetParameterOp(OpOperand, {StrAttribute, +/// StrAttribute}) +/// +class SetParameterOp : public ir::Op { + public: + using Op::Op; + + static const char* name() { return "SetParameterOp"; } + + static uint32_t attributes_num() { return 1; } + + static const char* attributes_name_[]; +}; + +} // namespace ir diff --git a/paddle/ir/builtin_type.h b/paddle/ir/builtin_type.h index 0f4db31d9d67d210a04d78ee4426089d7454a66b..d195cca2939c390b47ec15da3132df1823391a99 100644 --- a/paddle/ir/builtin_type.h +++ b/paddle/ir/builtin_type.h @@ -14,7 +14,6 @@ #pragma once -#include "paddle/ir/builtin_type_storage.h" #include "paddle/ir/type.h" namespace ir { @@ -25,7 +24,7 @@ namespace ir { /// #define GET_BUILT_IN_TYPE_LIST \ ir::Float16Type, ir::Float32Type, ir::Float64Type, ir::Int16Type, \ - ir::Int32Type, ir::Int64Type, ir::DenseTensorType + ir::Int32Type, ir::Int64Type /// /// \brief Define built-in parameterless types. Please add the necessary @@ -97,24 +96,4 @@ class Int64Type : public ir::Type { static Int64Type get(ir::IrContext *context); }; -/// -/// \brief Define built-in parameteric types. -/// -class DenseTensorType : public ir::Type { - public: - using Type::Type; - - DECLARE_TYPE_UTILITY_FUNCTOR(DenseTensorType, DenseTensorTypeStorage); - - const ir::Type &dtype() const; - - const ir::DenseTensorTypeStorage::Dim &dim() const; - - const ir::DenseTensorTypeStorage::DataLayout &data_layout() const; - - const ir::DenseTensorTypeStorage::LoD &lod() const; - - const size_t &offset() const; -}; - } // namespace ir diff --git a/paddle/ir/dialect.cc b/paddle/ir/dialect.cc index cdc171557148abf4a8abf05c24895c15c26f4263..8d52f3bbc9d7837ad0b197b7715d0f013e2a31a8 100644 --- a/paddle/ir/dialect.cc +++ b/paddle/ir/dialect.cc @@ -18,6 +18,8 @@ namespace ir { Dialect::Dialect(std::string name, ir::IrContext *context, ir::TypeId id) : name_(std::move(name)), context_(context), id_(id) {} +Dialect::~Dialect() = default; + void Dialect::RegisterType(ir::AbstractType &&abstract_type) { ir::AbstractType *new_abstract_type = new ir::AbstractType(std::move(abstract_type)); @@ -35,4 +37,18 @@ void Dialect::RegisterAttribute(ir::AbstractAttribute &&abstract_attribute) { void Dialect::RegisterOp(const std::string &name, OpInfoImpl *op_info) { this->ir_context()->RegisterOpInfo(name, op_info); } + +void Dialect::RegisterInterface(std::unique_ptr interface) { + VLOG(4) << "Register interface into dialect" << std::endl; + auto it = registered_interfaces_.emplace(interface->interface_id(), + std::move(interface)); + (void)it; +} + +DialectInterface::~DialectInterface() = default; + +IrContext *DialectInterface::ir_context() const { + return dialect_->ir_context(); +} + } // namespace ir diff --git a/paddle/ir/dialect.h b/paddle/ir/dialect.h index 10adc6b3e2e0b61666796d878d94e2a31f6a8453..9a6b42dad5aed26c6f9f406efdb541eb718aa59e 100644 --- a/paddle/ir/dialect.h +++ b/paddle/ir/dialect.h @@ -15,11 +15,13 @@ #pragma once #include "paddle/ir/attribute_base.h" +#include "paddle/ir/dialect_interface.h" #include "paddle/ir/ir_context.h" #include "paddle/ir/op_info_impl.h" #include "paddle/ir/type_base.h" namespace ir { +class DialectInterface; /// /// \brief Dialect can basically be understood as a namespace. In Dialect, we /// can define a series of types, attributes, operations, etc. An instance of @@ -31,6 +33,8 @@ class Dialect { public: Dialect(std::string name, ir::IrContext *context, ir::TypeId id); + virtual ~Dialect(); + const std::string &name() const { return name_; } ir::IrContext *ir_context() const { return context_; } @@ -49,8 +53,6 @@ class Dialect { template void RegisterType() { VLOG(4) << "Type registered into Dialect. --->"; - // if (this->ir_context()->registed_abstract_type().count( - // ir::TypeId::get()) == 0) { if (this->ir_context()->GetRegisteredAbstractType(ir::TypeId::get()) == nullptr) { ir::AbstractType *abstract_type = @@ -118,11 +120,50 @@ class Dialect { void RegisterOp(const std::string &name, OpInfoImpl *op_info); + /// + /// \brief Register interface methods. + /// + + DialectInterface *GetRegisteredInterface(TypeId id) { + auto it = registered_interfaces_.find(id); + return it != registered_interfaces_.end() ? it->second.get() : nullptr; + } + + template + InterfaceT *GetRegisteredInterface() { + return static_cast( + GetRegisteredInterface(TypeId::get())); + } + + /// Register a dialect interface with this dialect instance. + void RegisterInterface(std::unique_ptr interface); + + /// Register a set of dialect interfaces with this dialect instance. + template + void RegisterInterfaces() { + (void)std::initializer_list{ + 0, (RegisterInterface(std::make_unique(this)), 0)...}; + } + + template + InterfaceT &RegisterInterface(Args &&...args) { + InterfaceT *interface = new InterfaceT(this, std::forward(args)...); + RegisterInterface(std::unique_ptr(interface)); + return *interface; + } + private: + Dialect(const Dialect &) = delete; + + Dialect &operator=(Dialect &) = delete; + std::string name_; ir::IrContext *context_; // not owned ir::TypeId id_; + + std::unordered_map> + registered_interfaces_; }; } // namespace ir diff --git a/paddle/ir/dialect_interface.h b/paddle/ir/dialect_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..dd3ec08af52fd61029b9436be480a660463351a9 --- /dev/null +++ b/paddle/ir/dialect_interface.h @@ -0,0 +1,67 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/type_id.h" + +namespace ir { +class Dialect; +class IrContext; +/// +/// \brief DialectInterface +/// +template +class DialectInterfaceBase : public BaseT { + public: + using Base = DialectInterfaceBase; + + /// Get a unique id for the derived interface type. + static TypeId id() { return TypeId::get(); } + + protected: + explicit DialectInterfaceBase(Dialect *dialect) : BaseT(dialect, id()) {} +}; + +class DialectInterface { + public: + virtual ~DialectInterface(); + + /// The base class used for all derived interface types. This class provides + /// utilities necessary for registration. + template + using Base = DialectInterfaceBase; + + /// Return the dialect that this interface represents. + Dialect *dialect() const { return dialect_; } + + /// Return the context that holds the parent dialect of this interface. + IrContext *ir_context() const; + + /// Return the derived interface id. + TypeId interface_id() const { return interface_id_; } + + protected: + DialectInterface(Dialect *dialect, TypeId id) + : dialect_(dialect), interface_id_(id) {} + + private: + /// The dialect that represents this interface. + Dialect *dialect_; + + /// The unique identifier for the derived interface type. + TypeId interface_id_; +}; + +} // namespace ir diff --git a/paddle/ir/op_info_impl.h b/paddle/ir/op_info_impl.h index 78ca586d9d209887a9c9e307226c9ab049eab968..4380866bceb810f1a518e37e70b38c9b873d3b21 100644 --- a/paddle/ir/op_info_impl.h +++ b/paddle/ir/op_info_impl.h @@ -236,6 +236,12 @@ class OpInfoImpl { ir::Dialect *dialect() const { return dialect_; } + uint32_t AttributeNum() const { return num_attributes_; } + + const char *GetAttributeByIndex(size_t idx) const { + return idx < num_attributes_ ? p_attributes_[idx] : nullptr; + } + private: OpInfoImpl(uint32_t num_interfaces, uint32_t num_traits, diff --git a/paddle/ir/operation.cc b/paddle/ir/operation.cc index 9222ee7e1afbb4777174c69d2a5f39eafd18f25a..3cfe2b048e472c733008fc2a679c5145d2fc2185 100644 --- a/paddle/ir/operation.cc +++ b/paddle/ir/operation.cc @@ -13,6 +13,8 @@ // limitations under the License. #include "paddle/ir/operation.h" +#include "paddle/ir/dialect.h" +#include "paddle/ir/program.h" #include "paddle/ir/utils.h" namespace ir { @@ -21,7 +23,7 @@ namespace ir { // OpInlineResult, Operation, Operand. Operation *Operation::create(const std::vector &inputs, const std::vector &output_types, - ir::DictionaryAttribute attribute, + const AttributeMap &attribute, ir::OpInfo op_info) { // 1. Calculate the required memory size for OpResults + Operation + // OpOperands. @@ -63,7 +65,7 @@ Operation *Operation::create(const std::vector &inputs, new (base_ptr) detail::OpOperandImpl(inputs[idx].impl_, op); base_ptr += sizeof(detail::OpOperandImpl); } - VLOG(4) << "Construct an Operation: " << op->print(); + return op; } @@ -83,9 +85,18 @@ void Operation::destroy() { // 2.1. Deconstruct OpResult. char *base_ptr = aligned_ptr; for (size_t idx = num_results_; idx > 0; idx--) { - if (!reinterpret_cast(base_ptr)->use_empty()) { - throw("Cannot destroy a value that still has uses!"); + // release the uses of this result + detail::OpOperandImpl *first_use = + reinterpret_cast(base_ptr)->first_use(); + if (first_use != nullptr) { + first_use->release_source(); + detail::OpOperandImpl *next_use = first_use->next_use(); + while (next_use != nullptr) { + next_use->release_source(); + next_use = next_use->next_use(); + } } + // destory the result if (idx > max_inline_result_num) { reinterpret_cast(base_ptr) ->~OpOutlineResultImpl(); @@ -117,11 +128,8 @@ void Operation::destroy() { Operation::Operation(uint32_t num_results, uint32_t num_operands, - ir::DictionaryAttribute attribute, + const AttributeMap &attribute, ir::OpInfo op_info) { - if (!attribute) { - throw("unexpected null attribute dictionary"); - } num_results_ = num_results; num_operands_ = num_operands; attribute_ = attribute; @@ -153,6 +161,15 @@ ir::OpResult Operation::GetResultByIndex(uint32_t index) { } } +ir::OpOperand Operation::GetOperandByIndex(uint32_t index) { + if (index >= num_operands_) { + throw("index exceeds OP input range."); + } + char *ptr = reinterpret_cast(this) + sizeof(Operation) + + (index) * sizeof(detail::OpOperandImpl); + return ir::OpOperand(reinterpret_cast(ptr)); +} + std::string Operation::print() { std::stringstream result; result << "{ " << num_results_ << " outputs, " << num_operands_ @@ -173,4 +190,9 @@ std::string Operation::print() { return result.str(); } +std::string Operation::op_name() const { + return op_info_.impl()->dialect()->name() + "." + + std::string(op_info_.impl()->name()); +} + } // namespace ir diff --git a/paddle/ir/operation.h b/paddle/ir/operation.h index a51043ad5687e565eda1eb12392729c58d87b872..9730244851fc415b6effcd629258b42ed70ff8d8 100644 --- a/paddle/ir/operation.h +++ b/paddle/ir/operation.h @@ -24,25 +24,35 @@ template class OpTraitBase; template class OpInterfaceBase; +class Program; + +using AttributeMap = std::unordered_map; class alignas(8) Operation final { public: /// /// \brief Malloc memory and construct objects in the following order: /// OpResultImpls|Operation|OpOperandImpls. + /// NOTE: Similar to new and delete, the destroy() and the create() need to be + /// used in conjunction. /// static Operation *create(const std::vector &inputs, const std::vector &output_types, - ir::DictionaryAttribute attribute, + const AttributeMap &attribute, ir::OpInfo op_info); + /// + /// \brief Destroy the operation objects and free memeory by create(). + /// void destroy(); ir::OpResult GetResultByIndex(uint32_t index); + ir::OpOperand GetOperandByIndex(uint32_t index); + std::string print(); - ir::DictionaryAttribute attribute() const { return attribute_; } + const AttributeMap &attribute() const { return attribute_; } ir::OpInfo op_info() const { return op_info_; } @@ -50,6 +60,8 @@ class alignas(8) Operation final { uint32_t num_operands() const { return num_operands_; } + std::string op_name() const; + template T dyn_cast() const { return CastUtil::call(this); @@ -65,10 +77,16 @@ class alignas(8) Operation final { return op_info_.HasInterface(); } + Program *parent_program() const { return parent_program_; } + + void set_parent_program(Program *parent_program) { + parent_program_ = parent_program; + } + private: Operation(uint32_t num_results, uint32_t num_operands, - ir::DictionaryAttribute attribute, + const AttributeMap &attribute, ir::OpInfo op_info); template @@ -92,13 +110,15 @@ class alignas(8) Operation final { } }; - ir::DictionaryAttribute attribute_; + AttributeMap attribute_; ir::OpInfo op_info_; uint32_t num_results_ = 0; uint32_t num_operands_ = 0; + + ir::Program *parent_program_{nullptr}; }; } // namespace ir diff --git a/paddle/ir/parameter.h b/paddle/ir/parameter.h new file mode 100644 index 0000000000000000000000000000000000000000..38d74ea3cbff2e5f3db2b8aaab5fdf26575a1b4a --- /dev/null +++ b/paddle/ir/parameter.h @@ -0,0 +1,70 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/type.h" + +namespace ir { +/// +/// \brief Parameter represents the weight in the calculation graph. +/// +class Parameter { + public: + Parameter(void* data, size_t size, ir::Type type) { + data_ = malloc(size); + memcpy(data_, data, size); + size_ = size; + type_ = type; + } + + Parameter(const Parameter& param) { + data_ = malloc(param.size_); + memcpy(data_, param.data_, param.size_); + size_ = param.size_; + type_ = param.type_; + } + + Parameter& operator=(const Parameter& param) { + data_ = malloc(param.size_); + memcpy(data_, param.data_, param.size_); + size_ = param.size_; + type_ = param.type_; + return *this; + } + + ~Parameter() { free(data_); } + + Type type() const { return type_; } + + void* data() const { return data_; } + + bool is_mutable() const { return is_mutable_; } + + void set_mutable() { is_mutable_ = true; } + + private: + void* data_; + + /// + /// \brief Number of bytes held in data_. + /// + size_t size_; + + Type type_; + + bool is_mutable_ = false; +}; + +} // namespace ir diff --git a/paddle/ir/program.cc b/paddle/ir/program.cc new file mode 100644 index 0000000000000000000000000000000000000000..4caa7a8051341891dc52480db37e1b7215ca88bc --- /dev/null +++ b/paddle/ir/program.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/ir/program.h" +#include "paddle/ir/ir_context.h" + +namespace ir { +Program::~Program() { + for (auto op : ops_) { + op->destroy(); + } +} + +void Program::InsertOp(Operation* op) { + ops_.push_back(op); + op->set_parent_program(this); +} + +Parameter* Program::GetParameter(std::string name) const { + if (parameters_.count(name) != 0) { + return parameters_.at(name).get(); + } + return nullptr; +} + +void Program::SetParameter(std::string name, + std::unique_ptr&& parameter) { + parameters_[name].reset(parameter.release()); +} + +} // namespace ir diff --git a/paddle/ir/program.h b/paddle/ir/program.h new file mode 100644 index 0000000000000000000000000000000000000000..8b0a54d77c3213114ec9d892fba17b7044537e11 --- /dev/null +++ b/paddle/ir/program.h @@ -0,0 +1,59 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/ir/builtin_attribute.h" +#include "paddle/ir/operation.h" +#include "paddle/ir/parameter.h" + +namespace ir { +/// +/// \brief Program is an abstraction of model structure, divided into +/// computational graphs and weights. At the current stage, a computational +/// graph is represented in the form of a list. Todo: In the +/// future, detailed design of control flow operators will be carried out, and +/// concepts such as basic blocks, closures, and functions will be introduced to +/// continuously improve Program's ability to represent computational graphs. +/// +class Program { + public: + ~Program(); + + std::list ops() const { return ops_; } + + size_t parameters_num() const { return parameters_.size(); } + + /// + /// \brief Insert the Operation* constructed by Operation::create(...) into + /// this Program. NOTE: At this time, the memory management permission of + /// Operation* will be owned by this Program. The user does not need to call + /// Operation::destroy() manually + /// + void InsertOp(Operation* op); + + Parameter* GetParameter(std::string name) const; + + void SetParameter(std::string name, std::unique_ptr&& parameter); + + private: + std::list ops_; // owned + + std::unordered_map> parameters_; +}; + +} // namespace ir diff --git a/paddle/ir/type.h b/paddle/ir/type.h index e44157365744bbcdd4517f06c0afa0d221549458..37ceb39a687fea858c4e1fedf50c4715fd7374fc 100644 --- a/paddle/ir/type.h +++ b/paddle/ir/type.h @@ -57,7 +57,7 @@ class Type { const Storage *storage() const { return storage_; } - const Dialect &dialect() const { return storage_->abstract_type().dialect(); } + Dialect &dialect() const { return storage_->abstract_type().dialect(); } IrContext *ir_context() const; diff --git a/paddle/ir/type_base.h b/paddle/ir/type_base.h index 0757810e9b7466de323313c97a0a743cd78e78cd..c78cd54c9b392da49355be558e0556a0d70b1f80 100644 --- a/paddle/ir/type_base.h +++ b/paddle/ir/type_base.h @@ -62,7 +62,7 @@ class AbstractType { /// /// \return The dialect this type was registered to. /// - const Dialect &dialect() const { return dialect_; } + Dialect &dialect() const { return const_cast(dialect_); } /// /// \brief Find the AbstractType instance whose TypeId is type_id from diff --git a/paddle/ir/value.cc b/paddle/ir/value.cc index 2f1d5e5616f41d76f9c6b89291d809f70b3009c3..5c066902e303901f66de83a6d2779b4350c3e4c5 100644 --- a/paddle/ir/value.cc +++ b/paddle/ir/value.cc @@ -106,6 +106,10 @@ ir::Operation *OpOperandImpl::owner() const { return owner_; } ir::detail::OpOperandImpl *OpOperandImpl::next_use() { return next_use_; } +ir::Value OpOperandImpl::source() const { return source_; } + +void OpOperandImpl::release_source() { source_ = nullptr; } + OpOperandImpl::OpOperandImpl(ir::Value source, ir::Operation *owner) : source_(source), owner_(owner) { prev_use_addr_ = source.impl()->first_use_addr(); diff --git a/paddle/ir/value_impl.h b/paddle/ir/value_impl.h index 2fa236dddd83302dc9b046edbe46a3f438e57627..145c937b0cf009e72f8da0c363f448db84d357c4 100644 --- a/paddle/ir/value_impl.h +++ b/paddle/ir/value_impl.h @@ -31,6 +31,10 @@ class OpOperandImpl { ir::detail::OpOperandImpl *next_use(); + ir::Value source() const; + + void release_source(); + /// Remove this operand from the current use list. void remove_from_ud_chain(); diff --git a/test/cpp/ir/CMakeLists.txt b/test/cpp/ir/CMakeLists.txt index 9d1d9b9d42b752f1eaaa667bfae26d8935a63ea0..0c5385a32fbfadc79959fd6de9a09d131ea0297e 100644 --- a/test/cpp/ir/CMakeLists.txt +++ b/test/cpp/ir/CMakeLists.txt @@ -3,4 +3,13 @@ if(WITH_NEWIR) cc_test_old(ir_attribute_test SRCS ir_attribute_test.cc DEPS new_ir gtest) cc_test_old(ir_value_test SRCS ir_value_test.cc DEPS new_ir gtest) cc_test_old(ir_op_test SRCS ir_op_test.cc DEPS new_ir gtest) + cc_test_old( + ir_program_test + SRCS + ir_program_test.cc + DEPS + new_ir + pd_dialect + phi + gtest) endif() diff --git a/test/cpp/ir/ir_attribute_test.cc b/test/cpp/ir/ir_attribute_test.cc index f4949cd30fb14734c8f377262c988ddcaab92871..8aec0bec4b64dfbcf398f531a30d6f9d966bf200 100644 --- a/test/cpp/ir/ir_attribute_test.cc +++ b/test/cpp/ir/ir_attribute_test.cc @@ -58,34 +58,3 @@ TEST(attribute_test, built_in_attribute) { EXPECT_EQ(string_attr_cast_1.isa(), true); EXPECT_EQ(string_attr_cast_1.size() == 8, 1); } - -TEST(attribute_test, dictionary_attribute) { - ir::IrContext *ctx = ir::IrContext::Instance(); - std::string str_attr1_name = "attr1_name"; - std::string str_attr1_value = "attr1_value"; - ir::StrAttribute attr1_name = ir::StrAttribute::get(ctx, str_attr1_name); - ir::Attribute attr1_value = ir::StrAttribute::get(ctx, str_attr1_value); - std::string str_attr2_name = "attr2_name"; - std::string str_attr2_value = "attr2_value"; - ir::StrAttribute attr2_name = ir::StrAttribute::get(ctx, str_attr2_name); - ir::Attribute attr2_value = ir::StrAttribute::get(ctx, str_attr2_value); - - std::map named_attr1; - named_attr1.insert( - std::pair(attr1_name, attr1_value)); - named_attr1.insert( - std::pair(attr2_name, attr2_value)); - ir::DictionaryAttribute dic_attr1 = - ir::DictionaryAttribute::get(ctx, named_attr1); - std::map named_attr2; - named_attr2.insert( - std::pair(attr2_name, attr2_value)); - named_attr2.insert( - std::pair(attr1_name, attr1_value)); - ir::DictionaryAttribute dic_attr2 = - ir::DictionaryAttribute::get(ctx, named_attr2); - - EXPECT_EQ(dic_attr1, dic_attr2); - EXPECT_EQ(attr1_value, dic_attr1.GetValue(attr1_name)); - EXPECT_EQ(attr2_value, dic_attr1.GetValue(attr2_name)); -} diff --git a/test/cpp/ir/ir_op_test.cc b/test/cpp/ir/ir_op_test.cc index 342eb5330e504b0d11aabe5abd795cb53cbb85c3..f6fbff5dd3b0ef4717388df53b3a091947cb7af5 100644 --- a/test/cpp/ir/ir_op_test.cc +++ b/test/cpp/ir/ir_op_test.cc @@ -99,15 +99,14 @@ class TestDialect : public ir::Dialect { void initialize() { RegisterOps(); } }; -ir::DictionaryAttribute CreateAttribute(std::string attribute_name, - std::string attribute) { +ir::AttributeMap CreateAttributeMap(std::string attribute_name, + std::string attribute) { ir::IrContext *ctx = ir::IrContext::Instance(); - ir::StrAttribute attr_name = ir::StrAttribute::get(ctx, attribute_name); ir::Attribute attr_value = ir::StrAttribute::get(ctx, attribute); - std::map named_attr; - named_attr.insert( - std::pair(attr_name, attr_value)); - return ir::DictionaryAttribute::get(ctx, named_attr); + ir::AttributeMap attr_map; + attr_map.insert( + std::pair(attribute_name, attr_value)); + return attr_map; } TEST(op_test, op_test) { @@ -137,7 +136,7 @@ TEST(op_test, op_test) { ir::Operation *op = ir::Operation::create(op_inputs, op_output_types, - CreateAttribute("op1_name", "op1_attr"), + CreateAttributeMap("op1_name", "op1_attr"), op2_info); if (op->HasTrait()) { diff --git a/test/cpp/ir/ir_program_test.cc b/test/cpp/ir/ir_program_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..91e0fbf9bf4f89a12929efb17086bbcfaf0cc464 --- /dev/null +++ b/test/cpp/ir/ir_program_test.cc @@ -0,0 +1,206 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "paddle/fluid/dialect/pd_dialect.h" +#include "paddle/fluid/dialect/pd_type.h" +#include "paddle/fluid/dialect/utils.h" +#include "paddle/ir/builtin_attribute.h" +#include "paddle/ir/builtin_dialect.h" +#include "paddle/ir/builtin_op.h" +#include "paddle/ir/builtin_type.h" +#include "paddle/ir/ir_context.h" +#include "paddle/ir/program.h" +#include "paddle/ir/utils.h" +#include "paddle/phi/core/meta_tensor.h" +#include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/kernels/elementwise_add_kernel.h" + +class AddOp : public ir::Op { + public: + using Op::Op; + static const char *name() { return "Add"; } + static const char **attributes_name_; + static uint32_t attributes_num() { return 0; } +}; +const char **AddOp::attributes_name_ = nullptr; + +TEST(program_test, program) { + // (1) Init environment. + ir::IrContext *ctx = ir::IrContext::Instance(); + ir::Dialect *builtin_dialect = + ctx->GetOrRegisterDialect(); + builtin_dialect->RegisterOp(); + ir::Dialect *paddle_dialect = + ctx->GetOrRegisterDialect(); + + // (2) Create an empty program object + ir::Program program; + // ir::Program *program = new ir::Program(); + EXPECT_EQ(program.ops().size() == 0, true); + + // (3) Create a float32 DenseTensor Parameter and save into Program + ir::Type fp32_dtype = ir::Float32Type::get(ctx); + paddle::dialect::DenseTensorTypeStorage::Dim dims = {2, 2}; + paddle::dialect::DenseTensorTypeStorage::DataLayout data_layout = + paddle::dialect::DenseTensorTypeStorage::DataLayout::NCHW; + paddle::dialect::DenseTensorTypeStorage::LoD lod = {{0, 1, 2}}; + size_t offset = 0; + ir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get( + ctx, fp32_dtype, dims, data_layout, lod, offset); + + std::vector data_a = {1, 2, 3, 4}; + std::unique_ptr parameter_a = + std::make_unique(reinterpret_cast(data_a.data()), + 4 * sizeof(float), + dense_tensor_dtype); + program.SetParameter("a", std::move(parameter_a)); + EXPECT_EQ(program.parameters_num() == 1, true); + + std::vector data_b = {5, 6, 7, 8}; + std::unique_ptr parameter_b = + std::make_unique(reinterpret_cast(data_b.data()), + 4 * sizeof(float), + dense_tensor_dtype); + program.SetParameter("b", std::move(parameter_b)); + EXPECT_EQ(program.parameters_num() == 2, true); + + // (4) Def a = GetParameterOp("a"), and create DenseTensor for a. + std::string op1_name = + builtin_dialect->name() + "." + std::string(ir::GetParameterOp::name()); + ir::OpInfoImpl *op1_info = ctx->GetRegisteredOpInfo(op1_name); + std::unordered_map op1_attribute{ + {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; + ir::Operation *op1 = + ir::Operation::create({}, {dense_tensor_dtype}, op1_attribute, op1_info); + + program.InsertOp(op1); + + EXPECT_EQ(op1->GetResultByIndex(0).type().dialect().id(), + paddle_dialect->id()); + using Interface = paddle::dialect::ParameterConvertInterface; + Interface *a_interface = op1->GetResultByIndex(0) + .type() + .dialect() + .GetRegisteredInterface(); + std::shared_ptr a_var = + a_interface->ParameterToVariable(program.GetParameter("a")); + const phi::DenseTensor &a_tensor = a_var->Get(); + EXPECT_EQ(a_tensor.numel(), 4); + EXPECT_EQ(a_tensor.dims(), phi::DDim(dims.data(), dims.size())); + EXPECT_EQ(a_tensor.dtype(), paddle::dialect::TransToPhiDataType(fp32_dtype)); + EXPECT_EQ(a_tensor.layout(), + paddle::dialect::TransToPhiDataLayout(data_layout)); + EXPECT_EQ(a_tensor.lod(), lod); + EXPECT_EQ(a_tensor.offset(), offset); + for (int64_t i = 0; i < a_tensor.numel(); i++) { + EXPECT_EQ(*(a_tensor.data() + i), data_a[i]); + } + + // (5) Def b = GetParameterOp("b"), and create DenseTensor for b. + std::string op2_name = + builtin_dialect->name() + "." + std::string(ir::GetParameterOp::name()); + ir::OpInfoImpl *op2_info = ctx->GetRegisteredOpInfo(op2_name); + std::unordered_map op2_attribute{ + {"parameter_name", ir::StrAttribute::get(ctx, "b")}}; + ir::Operation *op2 = + ir::Operation::create({}, {dense_tensor_dtype}, op2_attribute, op2_info); + program.InsertOp(op2); + + EXPECT_EQ(op2->GetResultByIndex(0).type().dialect().id(), + paddle_dialect->id()); + Interface *b_interface = op2->GetResultByIndex(0) + .type() + .dialect() + .GetRegisteredInterface(); + std::shared_ptr b_var = + b_interface->ParameterToVariable(program.GetParameter("b")); + const phi::DenseTensor &b_tensor = b_var->Get(); + EXPECT_EQ(b_tensor.numel(), 4); + EXPECT_EQ(b_tensor.dims(), phi::DDim(dims.data(), dims.size())); + EXPECT_EQ(b_tensor.dtype(), paddle::dialect::TransToPhiDataType(fp32_dtype)); + EXPECT_EQ(b_tensor.layout(), + paddle::dialect::TransToPhiDataLayout(data_layout)); + EXPECT_EQ(b_tensor.lod(), lod); + EXPECT_EQ(b_tensor.offset(), offset); + for (int64_t i = 0; i < b_tensor.numel(); i++) { + EXPECT_EQ(*(b_tensor.data() + i), data_b[i]); + } + + // (6) Def c = AddOp(a, b), execute this op. + std::string op3_name = + builtin_dialect->name() + "." + std::string(AddOp::name()); + ir::OpInfoImpl *op3_info = ctx->GetRegisteredOpInfo(op3_name); + std::unordered_map op3_attribute; + ir::Operation *op3 = ir::Operation::create( + {op1->GetResultByIndex(0), op2->GetResultByIndex(0)}, + {dense_tensor_dtype}, + op3_attribute, + op3_info); + program.InsertOp(op3); + + phi::CPUContext *dev_ctx = static_cast( + paddle::platform::DeviceContextPool::Instance().Get( + paddle::platform::CPUPlace())); + phi::DenseTensor c_tensor = + phi::Add(*dev_ctx, a_tensor, b_tensor); + std::shared_ptr variable_c = + std::make_shared(); + auto *dst_tensor = variable_c->GetMutable(); + *dst_tensor = c_tensor; + EXPECT_EQ(dst_tensor->numel(), b_tensor.numel()); + EXPECT_EQ(dst_tensor->dims(), b_tensor.dims()); + EXPECT_EQ(dst_tensor->dtype(), b_tensor.dtype()); + EXPECT_EQ(dst_tensor->layout(), b_tensor.layout()); + EXPECT_EQ(dst_tensor->lod(), b_tensor.lod()); + EXPECT_EQ(dst_tensor->offset(), b_tensor.offset()); + for (int64_t i = 0; i < dst_tensor->numel(); i++) { + EXPECT_EQ(*(dst_tensor->data() + i), data_a[i] + data_b[i]); + } + + // (7) Def SetParameterOp(c, "c") + std::string op4_name = + builtin_dialect->name() + "." + std::string(ir::SetParameterOp::name()); + ir::OpInfoImpl *op4_info = ctx->GetRegisteredOpInfo(op4_name); + std::unordered_map op4_attribute{ + {"parameter_name", ir::StrAttribute::get(ctx, "c")}}; + ir::Operation *op4 = ir::Operation::create( + {op3->GetResultByIndex(0)}, {}, op4_attribute, op4_info); + program.InsertOp(op4); + + EXPECT_EQ(op4->GetOperandByIndex(0).impl()->source().type().dialect().id(), + paddle_dialect->id()); + Interface *c_interface = op4->GetOperandByIndex(0) + .impl() + ->source() + .type() + .dialect() + .GetRegisteredInterface(); + // ir::Parameter *parameter_c = + // c_interface->VariableToParameter(variable_c.get()); + std::unique_ptr parameter_c = + c_interface->VariableToParameter(variable_c.get()); + EXPECT_EQ(parameter_c->type(), dense_tensor_dtype); + for (int64_t i = 0; i < dst_tensor->numel(); i++) { + EXPECT_EQ(*(dst_tensor->data() + i), + *(static_cast(parameter_c->data()) + i)); + } + program.SetParameter("c", std::move(parameter_c)); + + // (8) Traverse Program + std::list ops = program.ops(); + EXPECT_EQ(ops.size() == 4, true); + EXPECT_EQ(program.parameters_num() == 3, true); +} diff --git a/test/cpp/ir/ir_value_test.cc b/test/cpp/ir/ir_value_test.cc index 102619905fe7b7dc729c86c7e2e285079bcc67af..e0f0d83312a8903784c371831ef8361d07c80207 100644 --- a/test/cpp/ir/ir_value_test.cc +++ b/test/cpp/ir/ir_value_test.cc @@ -23,16 +23,14 @@ // This unittest is used to test the construction interfaces of value class and // operation. The constructed test scenario is: a = OP1(); b = OP2(); c = OP3(a, // b); d, e, f, g, h, i, j = OP4(a, c); - -ir::DictionaryAttribute CreateAttribute(std::string attribute_name, - std::string attribute) { +ir::AttributeMap CreateAttributeMap(std::string attribute_name, + std::string attribute) { ir::IrContext *ctx = ir::IrContext::Instance(); - ir::StrAttribute attr_name = ir::StrAttribute::get(ctx, attribute_name); ir::Attribute attr_value = ir::StrAttribute::get(ctx, attribute); - std::map named_attr; - named_attr.insert( - std::pair(attr_name, attr_value)); - return ir::DictionaryAttribute::get(ctx, named_attr); + ir::AttributeMap attr_map; + attr_map.insert( + std::pair(attribute_name, attr_value)); + return attr_map; } TEST(value_test, value_test) { @@ -43,7 +41,7 @@ TEST(value_test, value_test) { ir::Operation *op1 = ir::Operation::create(op1_inputs, op1_output_types, - CreateAttribute("op1_name", "op1_attr"), + CreateAttributeMap("op1_name", "op1_attr"), nullptr); std::cout << op1->print() << std::endl; // 2. Construct OP2: b = OP2(); @@ -52,7 +50,7 @@ TEST(value_test, value_test) { ir::Operation *op2 = ir::Operation::create(op2_inputs, op2_output_types, - CreateAttribute("op2_name", "op2_attr"), + CreateAttributeMap("op2_name", "op2_attr"), nullptr); std::cout << op2->print() << std::endl; // 3. Construct OP3: c = OP3(a, b); @@ -62,7 +60,7 @@ TEST(value_test, value_test) { ir::Operation *op3 = ir::Operation::create(op3_inputs, op3_output_types, - CreateAttribute("op3_name", "op3_attr"), + CreateAttributeMap("op3_name", "op3_attr"), nullptr); std::cout << op3->print() << std::endl; // 4. Construct OP4: d, e, f, g, h, i, j = OP4(a, c); @@ -75,7 +73,7 @@ TEST(value_test, value_test) { ir::Operation *op4 = ir::Operation::create(op4_inputs, op4_output_types, - CreateAttribute("op4_name", "op4_attr"), + CreateAttributeMap("op4_name", "op4_attr"), nullptr); std::cout << op4->print() << std::endl; diff --git a/test/cpp/ir/type_test.cc b/test/cpp/ir/type_test.cc index d21afdcb80a59f324c6410d1643a140e025bac20..8613c9d6afa67a6212305076ea1fc703edcad01c 100644 --- a/test/cpp/ir/type_test.cc +++ b/test/cpp/ir/type_test.cc @@ -124,46 +124,10 @@ TEST(type_test, built_in_type) { &ir::AbstractType::lookup(int64_1.type_id(), ctx)); EXPECT_EQ(ir::Int64Type::classof(int64_1), 1); - // Test 2: Test the parameteric built-in type of IrContext. - ir::DenseTensorTypeStorage::Dim dims = {1, 2, 3}; - ir::DenseTensorTypeStorage::DataLayout data_layout = - ir::DenseTensorTypeStorage::DataLayout::NCHW; - ir::DenseTensorTypeStorage::LoD lod = {{1, 2, 3}, {4, 5, 6}}; - size_t offset = 0; - - ir::Type dense_tensor_1 = - ir::DenseTensorType::get(ctx, fp32_1, dims, data_layout, lod, offset); - ir::Type dense_tensor_2 = - ir::DenseTensorType::get(ctx, fp32_2, dims, data_layout, lod, offset); - ir::Type dense_tensor_3 = - ir::DenseTensorType::get(ctx, fp32_1, dims, data_layout, lod, 2); - - EXPECT_EQ(dense_tensor_1, dense_tensor_2); - EXPECT_NE(dense_tensor_1, dense_tensor_3); - EXPECT_EQ(dense_tensor_1.type_id(), dense_tensor_2.type_id()); - EXPECT_EQ(ir::DenseTensorType::classof(dense_tensor_1), 1); - - ir::DenseTensorType dense_tensor_4 = - ir::DenseTensorType::get(ctx, fp32_1, dims, data_layout, lod, 2); - EXPECT_EQ(dense_tensor_4.offset() == 2, 1); - EXPECT_EQ(dense_tensor_4.dtype().isa(), true); - EXPECT_EQ(dense_tensor_4.data_layout(), data_layout); - - // Test 3: Test isa and dyn_cast. + // Test 2: Test isa and dyn_cast. EXPECT_EQ(fp16_1.isa(), true); EXPECT_EQ(fp16_1.isa(), false); - EXPECT_EQ(fp16_1.isa(), false); EXPECT_EQ(fp16_1.isa(), true); - EXPECT_EQ(dense_tensor_1.isa(), true); - - ir::DenseTensorType dense_tensor_cast_1 = - dense_tensor_1.dyn_cast(); - EXPECT_EQ(dense_tensor_cast_1.isa(), true); - EXPECT_EQ(dense_tensor_cast_1.offset() == 0, 1); - const ir::DenseTensorType dense_tensor_cast_2 = - ir::dyn_cast(dense_tensor_1); - EXPECT_EQ(dense_tensor_cast_2.isa(), true); - EXPECT_EQ(dense_tensor_cast_2.offset() == 0, 1); } // Customize a parameterized TypeStorage IntegerTypeStorage.