未验证 提交 78967ad2 编写于 作者: Z zhangbo9674 提交者: GitHub

[IR] Program & Parameter & PaddleDialect (#53557)

* add program parameter dialect_interface

* fix op create bug

* add ir parameter convert pd variable methods

* refine code

* fix bug

* refine by ut

* refine ut

* delete unused code

* refine code

* refine code by comment

* reset WITH_NEW_IR

* refine op attribute map

* refine program and op create

* refine program and op create
上级 56e8affe
......@@ -8,5 +8,8 @@ add_subdirectory(pybind)
add_subdirectory(eager)
add_subdirectory(prim)
add_subdirectory(jit)
if(WITH_NEWIR)
add_subdirectory(dialect)
endif()
# NOTE: please add subdirectory inference at last.
add_subdirectory(inference)
set(PD_DIALECT_SOURCE_DIR "${PADDLE_SOURCE_DIR}/paddle/fluid/dialect")
set(PD_DIALECT_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/fluid/dialect")
file(GLOB PD_DIALECT_SRCS "*.cc")
cc_library(
pd_dialect
SRCS ${PD_DIALECT_SRCS}
DEPS new_ir framework_proto dense_tensor)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/dialect/pd_dialect.h"
#include "paddle/fluid/dialect/pd_type.h"
#include "paddle/fluid/dialect/utils.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/ir/builtin_type.h"
#include "paddle/ir/dialect_interface.h"
#include "paddle/phi/core/dense_tensor.h"
namespace paddle {
namespace dialect {
std::shared_ptr<paddle::framework::Variable>
ParameterConvertInterface::ParameterToVariable(ir::Parameter* parameter) {
if (parameter->type().isa<DenseTensorType>()) {
VLOG(4) << "Convert a DenseTensor Parameter to a variable.";
std::shared_ptr<paddle::framework::Variable> var =
std::make_shared<paddle::framework::Variable>();
phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
// Init DenseTensor
auto dim = parameter->type().dyn_cast<DenseTensorType>().dim();
phi::DenseTensorMeta meta(
TransToPhiDataType(
parameter->type().dyn_cast<DenseTensorType>().dtype()),
phi::DDim(dim.data(), dim.size()),
TransToPhiDataLayout(
parameter->type().dyn_cast<DenseTensorType>().data_layout()),
parameter->type().dyn_cast<DenseTensorType>().lod(),
parameter->type().dyn_cast<DenseTensorType>().offset());
tensor->set_meta(meta);
paddle::platform::DeviceContext* dev_ctx =
paddle::platform::DeviceContextPool::Instance().Get(
paddle::platform::CPUPlace());
dev_ctx->Alloc(tensor,
TransToPhiDataType(
parameter->type().dyn_cast<DenseTensorType>().dtype()));
memcpy(tensor->data(),
parameter->data(),
tensor->numel() * phi::SizeOf(tensor->dtype()));
return var;
} else {
return nullptr;
}
}
std::unique_ptr<ir::Parameter> ParameterConvertInterface::VariableToParameter(
paddle::framework::Variable* var) {
if (var->IsType<phi::DenseTensor>()) {
phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
// Get Meta
ir::IrContext* ctx = ir::IrContext::Instance();
ir::Type data_type = TransToIrDataType(tensor->dtype(), ctx);
DenseTensorTypeStorage::Dim dims(tensor->dims().size());
std::copy(tensor->dims().Get(),
tensor->dims().Get() + tensor->dims().size(),
dims.data());
DenseTensorTypeStorage::DataLayout data_layout =
TransToIrDataLayout(tensor->layout());
DenseTensorTypeStorage::LoD lod = tensor->lod();
size_t offset = tensor->meta().offset;
void* data = tensor->data();
ir::Type dense_tensor_type =
DenseTensorType::get(ctx, data_type, dims, data_layout, lod, offset);
return std::make_unique<ir::Parameter>(
data,
tensor->numel() * phi::SizeOf(tensor->dtype()),
dense_tensor_type);
} else {
return nullptr;
}
}
PaddleDialect::PaddleDialect(ir::IrContext* context)
: ir::Dialect(name(), context, ir::TypeId::get<PaddleDialect>()) {
initialize();
}
void PaddleDialect::initialize() {
RegisterTypes<GET_PADDLE_TYPE_LIST>();
RegisterInterfaces<ParameterConvertInterface>();
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/variable.h"
#include "paddle/ir/dialect.h"
#include "paddle/ir/parameter.h"
namespace paddle {
namespace dialect {
class ParameterConvertInterface
: public ir::DialectInterface::Base<ParameterConvertInterface> {
public:
explicit ParameterConvertInterface(ir::Dialect* dialect) : Base(dialect) {}
// NOTE(zhangbo): Only support new a CPU Variable.
std::shared_ptr<paddle::framework::Variable> ParameterToVariable(
ir::Parameter* parameter);
std::unique_ptr<ir::Parameter> VariableToParameter(
paddle::framework::Variable* var);
};
class PaddleDialect : public ir::Dialect {
public:
explicit PaddleDialect(ir::IrContext* context);
static const char* name() { return "pd"; }
private:
void initialize();
};
} // namespace dialect
} // namespace paddle
......@@ -12,23 +12,28 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/ir/builtin_type.h"
#include "paddle/fluid/dialect/pd_type.h"
namespace ir {
namespace paddle {
namespace dialect {
const ir::Type& DenseTensorType::dtype() const { return storage()->dtype_; }
const ir::DenseTensorTypeStorage::Dim& DenseTensorType::dim() const {
const paddle::dialect::DenseTensorTypeStorage::Dim& DenseTensorType::dim()
const {
return storage()->dims_;
}
const ir::DenseTensorTypeStorage::DataLayout& DenseTensorType::data_layout()
const {
const paddle::dialect::DenseTensorTypeStorage::DataLayout&
DenseTensorType::data_layout() const {
return storage()->layout_;
}
const ir::DenseTensorTypeStorage::LoD& DenseTensorType::lod() const {
const paddle::dialect::DenseTensorTypeStorage::LoD& DenseTensorType::lod()
const {
return storage()->lod_;
}
const size_t& DenseTensorType::offset() const { return storage()->offset_; }
} // namespace ir
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/dialect/pd_type_storage.h"
#include "paddle/ir/type.h"
namespace paddle {
namespace dialect {
#define GET_PADDLE_TYPE_LIST paddle::dialect::DenseTensorType
///
/// \brief Define built-in parameteric types.
///
class DenseTensorType : public ir::Type {
public:
using Type::Type;
DECLARE_TYPE_UTILITY_FUNCTOR(DenseTensorType, DenseTensorTypeStorage);
const ir::Type &dtype() const;
const paddle::dialect::DenseTensorTypeStorage::Dim &dim() const;
const paddle::dialect::DenseTensorTypeStorage::DataLayout &data_layout()
const;
const paddle::dialect::DenseTensorTypeStorage::LoD &lod() const;
const size_t &offset() const;
};
} // namespace dialect
} // namespace paddle
......@@ -36,7 +36,8 @@ struct hash<std::vector<T>> {
} // namespace std
namespace ir {
namespace paddle {
namespace dialect {
///
/// \brief Define Parameteric TypeStorage for DenseTensorType.
///
......@@ -151,4 +152,5 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
size_t offset_;
};
} // namespace ir
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/dialect/pd_type_storage.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/ir/builtin_type.h"
#include "paddle/phi/core/dense_tensor.h"
namespace paddle {
namespace dialect {
// TODO(zhangbo): The builtin type needs to cover all data types of
// phi::DataType.
inline phi::DataType TransToPhiDataType(ir::Type dtype) {
if (dtype.isa<ir::Float16Type>()) {
return phi::DataType::FLOAT16;
} else if (dtype.isa<ir::Float32Type>()) {
return phi::DataType::FLOAT32;
} else if (dtype.isa<ir::Float64Type>()) {
return phi::DataType::FLOAT64;
} else if (dtype.isa<ir::Int16Type>()) {
return phi::DataType::INT16;
} else if (dtype.isa<ir::Int32Type>()) {
return phi::DataType::INT32;
} else if (dtype.isa<ir::Int64Type>()) {
return phi::DataType::INT64;
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unsupported ir data type when casting it into "
"phi data type."));
}
}
inline ir::Type TransToIrDataType(phi::DataType dtype,
ir::IrContext *ctx = nullptr) {
if (ctx == nullptr) {
ctx = ir::IrContext::Instance();
}
switch (dtype) {
case phi::DataType::FLOAT16:
return ir::Float16Type::get(ctx);
case phi::DataType::FLOAT32:
return ir::Float32Type::get(ctx);
case phi::DataType::FLOAT64:
return ir::Float64Type::get(ctx);
case phi::DataType::INT16:
return ir::Int16Type::get(ctx);
case phi::DataType::INT32:
return ir::Int32Type::get(ctx);
case phi::DataType::INT64:
return ir::Int64Type::get(ctx);
default:
PADDLE_THROW(phi::errors::Unimplemented(
"Unsupported phi data type `%s` when casting it into "
"ir data type.",
dtype));
}
}
inline phi::DataLayout TransToPhiDataLayout(
DenseTensorTypeStorage::DataLayout data_layout) {
switch (data_layout) {
case DenseTensorTypeStorage::DataLayout::NHWC:
return phi::DataLayout::NHWC;
case DenseTensorTypeStorage::DataLayout::NCHW:
return phi::DataLayout::NCHW;
case DenseTensorTypeStorage::DataLayout::NCDHW:
return phi::DataLayout::NCDHW;
case DenseTensorTypeStorage::DataLayout::NDHWC:
return phi::DataLayout::NDHWC;
case DenseTensorTypeStorage::DataLayout::ONEDNN:
return phi::DataLayout::ONEDNN;
case DenseTensorTypeStorage::DataLayout::SPARSE_COO:
return phi::DataLayout::SPARSE_COO;
case DenseTensorTypeStorage::DataLayout::SPARSE_CSR:
return phi::DataLayout::SPARSE_CSR;
case DenseTensorTypeStorage::DataLayout::PSTRING_UNION:
return phi::DataLayout::PSTRING_UNION;
case DenseTensorTypeStorage::DataLayout::NUM_DATA_LAYOUTS:
return phi::DataLayout::NUM_DATA_LAYOUTS;
case DenseTensorTypeStorage::DataLayout::ALL_LAYOUT:
return phi::DataLayout::ALL_LAYOUT;
default:
PADDLE_THROW(phi::errors::Unimplemented(
"Unsupported ir data layout `%s` when casting it into "
"phi data type.",
static_cast<int>(data_layout)));
}
}
inline DenseTensorTypeStorage::DataLayout TransToIrDataLayout(
phi::DataLayout data_layout) {
switch (data_layout) {
case phi::DataLayout::NHWC:
return DenseTensorTypeStorage::DataLayout::NHWC;
case phi::DataLayout::NCHW:
return DenseTensorTypeStorage::DataLayout::NCHW;
case phi::DataLayout::NCDHW:
return DenseTensorTypeStorage::DataLayout::NCDHW;
case phi::DataLayout::NDHWC:
return DenseTensorTypeStorage::DataLayout::NDHWC;
case phi::DataLayout::ONEDNN:
return DenseTensorTypeStorage::DataLayout::ONEDNN;
case phi::DataLayout::SPARSE_COO:
return DenseTensorTypeStorage::DataLayout::SPARSE_COO;
case phi::DataLayout::SPARSE_CSR:
return DenseTensorTypeStorage::DataLayout::SPARSE_CSR;
case phi::DataLayout::PSTRING_UNION:
return DenseTensorTypeStorage::DataLayout::PSTRING_UNION;
case phi::DataLayout::NUM_DATA_LAYOUTS:
return DenseTensorTypeStorage::DataLayout::NUM_DATA_LAYOUTS;
case phi::DataLayout::ALL_LAYOUT:
return DenseTensorTypeStorage::DataLayout::ALL_LAYOUT;
default:
PADDLE_THROW(phi::errors::Unimplemented(
"Unsupported phi data layout `%s` when casting it into "
"ir data type.",
static_cast<int>(data_layout)));
}
}
} // namespace dialect
} // namespace paddle
......@@ -19,24 +19,4 @@ std::string StrAttribute::data() const { return storage()->GetAsKey(); }
uint32_t StrAttribute::size() const { return storage()->GetAsKey().size(); }
NamedAttribute::NamedAttribute(StrAttribute name, Attribute value)
: name_(name), value_(value) {}
bool NamedAttribute::operator<(const NamedAttribute &right) const {
return name() < right.name();
}
bool NamedAttribute::operator==(const NamedAttribute &right) const {
return name() == right.name() && value() == right.value();
}
bool NamedAttribute::operator!=(const NamedAttribute &right) const {
return !(*this == right);
}
Attribute DictionaryAttribute::GetValue(const StrAttribute &name) {
return storage()->GetValue(name);
}
uint32_t DictionaryAttribute::size() const { return storage()->size(); }
} // namespace ir
......@@ -22,7 +22,7 @@ namespace ir {
///
/// \brief All built-in attributes.
///
#define GET_BUILT_IN_ATTRIBUTE_LIST ir::StrAttribute, ir::DictionaryAttribute
#define GET_BUILT_IN_ATTRIBUTE_LIST ir::StrAttribute
class StrAttribute : public ir::Attribute {
public:
......@@ -39,55 +39,13 @@ class StrAttribute : public ir::Attribute {
uint32_t size() const;
};
class NamedAttribute {
public:
NamedAttribute(StrAttribute name, Attribute value);
StrAttribute name() const { return name_; }
Attribute value() const { return value_; }
void SetName(StrAttribute name) { name_ = name; }
void SetValue(Attribute value) { value_ = value; }
bool operator<(const NamedAttribute &right) const;
bool operator==(const NamedAttribute &right) const;
bool operator!=(const NamedAttribute &right) const;
friend struct std::hash<NamedAttribute>;
operator std::pair<const StrAttribute, Attribute>() const {
return std::make_pair(name_, value_);
}
private:
StrAttribute name_;
Attribute value_;
};
class DictionaryAttribute : public ir::Attribute {
public:
using Attribute::Attribute;
DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(DictionaryAttribute,
DictionaryAttributeStorage);
Attribute GetValue(const StrAttribute &name);
uint32_t size() const;
};
} // namespace ir
namespace std {
template <>
struct hash<ir::NamedAttribute> {
std::size_t operator()(const ir::NamedAttribute &obj) const {
return ir::hash_combine(std::hash<ir::Attribute>()(obj.name_),
std::hash<ir::Attribute>()(obj.value_));
struct hash<ir::StrAttribute> {
std::size_t operator()(const ir::StrAttribute &obj) const {
return std::hash<const ir::StrAttribute::Storage *>()(obj.storage());
}
};
} // namespace std
......@@ -54,36 +54,4 @@ struct StrAttributeStorage : public ir::AttributeStorage {
uint32_t size_;
};
///
/// \brief Define Parameteric AttributeStorage for DictionaryAttributeStorage.
///
class StrAttribute;
class NamedAttribute;
struct DictionaryAttributeStorage : public AttributeStorage {
using ParamKey = std::map<StrAttribute, Attribute>;
explicit DictionaryAttributeStorage(const ParamKey &key);
~DictionaryAttributeStorage() { free(data_); }
static DictionaryAttributeStorage *Construct(ParamKey key) {
return new DictionaryAttributeStorage(key);
}
static std::size_t HashValue(const ParamKey &key);
bool operator==(const ParamKey &key) const;
ParamKey GetAsKey() const;
Attribute GetValue(const StrAttribute &name) const;
NamedAttribute *data() const { return data_; }
uint32_t size() const { return size_; }
private:
NamedAttribute *data_;
uint32_t size_;
};
} // namespace ir
......@@ -14,6 +14,7 @@
#include "paddle/ir/builtin_dialect.h"
#include "paddle/ir/builtin_attribute.h"
#include "paddle/ir/builtin_op.h"
#include "paddle/ir/builtin_type.h"
namespace ir {
......@@ -26,6 +27,7 @@ void BuiltinDialect::initialize() {
// Register all built-in types defined in builtin_type.h.
RegisterTypes<GET_BUILT_IN_TYPE_LIST>();
RegisterAttributes<GET_BUILT_IN_ATTRIBUTE_LIST>();
RegisterOps<GET_BUILT_IN_OP_LIST>();
}
} // namespace ir
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/ir/builtin_op.h"
namespace ir {
const char *GetParameterOp::attributes_name_[] = {"parameter_name"};
const char *SetParameterOp::attributes_name_[] = {"parameter_name"};
} // namespace ir
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/op_base.h"
namespace ir {
///
/// \brief This macro is used to get a list of all built-in OPs in this file.
/// The built-in Dialect will use this macro to quickly register all built-in
/// OPs.
///
#define GET_BUILT_IN_OP_LIST ir::GetParameterOp, ir::SetParameterOp
///
/// \brief GetParameterOp: OpResult = GetParameterOp({StrAttribute,
/// StrAttribute})
///
class GetParameterOp : public ir::Op<GetParameterOp> {
public:
using Op::Op;
static const char* name() { return "GetParameterOp"; }
static uint32_t attributes_num() { return 1; }
static const char* attributes_name_[];
};
///
/// \brief GetParameterOp: SetParameterOp(OpOperand, {StrAttribute,
/// StrAttribute})
///
class SetParameterOp : public ir::Op<SetParameterOp> {
public:
using Op::Op;
static const char* name() { return "SetParameterOp"; }
static uint32_t attributes_num() { return 1; }
static const char* attributes_name_[];
};
} // namespace ir
......@@ -14,7 +14,6 @@
#pragma once
#include "paddle/ir/builtin_type_storage.h"
#include "paddle/ir/type.h"
namespace ir {
......@@ -25,7 +24,7 @@ namespace ir {
///
#define GET_BUILT_IN_TYPE_LIST \
ir::Float16Type, ir::Float32Type, ir::Float64Type, ir::Int16Type, \
ir::Int32Type, ir::Int64Type, ir::DenseTensorType
ir::Int32Type, ir::Int64Type
///
/// \brief Define built-in parameterless types. Please add the necessary
......@@ -97,24 +96,4 @@ class Int64Type : public ir::Type {
static Int64Type get(ir::IrContext *context);
};
///
/// \brief Define built-in parameteric types.
///
class DenseTensorType : public ir::Type {
public:
using Type::Type;
DECLARE_TYPE_UTILITY_FUNCTOR(DenseTensorType, DenseTensorTypeStorage);
const ir::Type &dtype() const;
const ir::DenseTensorTypeStorage::Dim &dim() const;
const ir::DenseTensorTypeStorage::DataLayout &data_layout() const;
const ir::DenseTensorTypeStorage::LoD &lod() const;
const size_t &offset() const;
};
} // namespace ir
......@@ -18,6 +18,8 @@ namespace ir {
Dialect::Dialect(std::string name, ir::IrContext *context, ir::TypeId id)
: name_(std::move(name)), context_(context), id_(id) {}
Dialect::~Dialect() = default;
void Dialect::RegisterType(ir::AbstractType &&abstract_type) {
ir::AbstractType *new_abstract_type =
new ir::AbstractType(std::move(abstract_type));
......@@ -35,4 +37,18 @@ void Dialect::RegisterAttribute(ir::AbstractAttribute &&abstract_attribute) {
void Dialect::RegisterOp(const std::string &name, OpInfoImpl *op_info) {
this->ir_context()->RegisterOpInfo(name, op_info);
}
void Dialect::RegisterInterface(std::unique_ptr<DialectInterface> interface) {
VLOG(4) << "Register interface into dialect" << std::endl;
auto it = registered_interfaces_.emplace(interface->interface_id(),
std::move(interface));
(void)it;
}
DialectInterface::~DialectInterface() = default;
IrContext *DialectInterface::ir_context() const {
return dialect_->ir_context();
}
} // namespace ir
......@@ -15,11 +15,13 @@
#pragma once
#include "paddle/ir/attribute_base.h"
#include "paddle/ir/dialect_interface.h"
#include "paddle/ir/ir_context.h"
#include "paddle/ir/op_info_impl.h"
#include "paddle/ir/type_base.h"
namespace ir {
class DialectInterface;
///
/// \brief Dialect can basically be understood as a namespace. In Dialect, we
/// can define a series of types, attributes, operations, etc. An instance of
......@@ -31,6 +33,8 @@ class Dialect {
public:
Dialect(std::string name, ir::IrContext *context, ir::TypeId id);
virtual ~Dialect();
const std::string &name() const { return name_; }
ir::IrContext *ir_context() const { return context_; }
......@@ -49,8 +53,6 @@ class Dialect {
template <typename T>
void RegisterType() {
VLOG(4) << "Type registered into Dialect. --->";
// if (this->ir_context()->registed_abstract_type().count(
// ir::TypeId::get<T>()) == 0) {
if (this->ir_context()->GetRegisteredAbstractType(ir::TypeId::get<T>()) ==
nullptr) {
ir::AbstractType *abstract_type =
......@@ -118,11 +120,50 @@ class Dialect {
void RegisterOp(const std::string &name, OpInfoImpl *op_info);
///
/// \brief Register interface methods.
///
DialectInterface *GetRegisteredInterface(TypeId id) {
auto it = registered_interfaces_.find(id);
return it != registered_interfaces_.end() ? it->second.get() : nullptr;
}
template <typename InterfaceT>
InterfaceT *GetRegisteredInterface() {
return static_cast<InterfaceT *>(
GetRegisteredInterface(TypeId::get<InterfaceT>()));
}
/// Register a dialect interface with this dialect instance.
void RegisterInterface(std::unique_ptr<DialectInterface> interface);
/// Register a set of dialect interfaces with this dialect instance.
template <typename... Args>
void RegisterInterfaces() {
(void)std::initializer_list<int>{
0, (RegisterInterface(std::make_unique<Args>(this)), 0)...};
}
template <typename InterfaceT, typename... Args>
InterfaceT &RegisterInterface(Args &&...args) {
InterfaceT *interface = new InterfaceT(this, std::forward<Args>(args)...);
RegisterInterface(std::unique_ptr<DialectInterface>(interface));
return *interface;
}
private:
Dialect(const Dialect &) = delete;
Dialect &operator=(Dialect &) = delete;
std::string name_;
ir::IrContext *context_; // not owned
ir::TypeId id_;
std::unordered_map<TypeId, std::unique_ptr<DialectInterface>>
registered_interfaces_;
};
} // namespace ir
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/type_id.h"
namespace ir {
class Dialect;
class IrContext;
///
/// \brief DialectInterface
///
template <typename ConcreteType, typename BaseT>
class DialectInterfaceBase : public BaseT {
public:
using Base = DialectInterfaceBase<ConcreteType, BaseT>;
/// Get a unique id for the derived interface type.
static TypeId id() { return TypeId::get<ConcreteType>(); }
protected:
explicit DialectInterfaceBase(Dialect *dialect) : BaseT(dialect, id()) {}
};
class DialectInterface {
public:
virtual ~DialectInterface();
/// The base class used for all derived interface types. This class provides
/// utilities necessary for registration.
template <typename ConcreteType>
using Base = DialectInterfaceBase<ConcreteType, DialectInterface>;
/// Return the dialect that this interface represents.
Dialect *dialect() const { return dialect_; }
/// Return the context that holds the parent dialect of this interface.
IrContext *ir_context() const;
/// Return the derived interface id.
TypeId interface_id() const { return interface_id_; }
protected:
DialectInterface(Dialect *dialect, TypeId id)
: dialect_(dialect), interface_id_(id) {}
private:
/// The dialect that represents this interface.
Dialect *dialect_;
/// The unique identifier for the derived interface type.
TypeId interface_id_;
};
} // namespace ir
......@@ -236,6 +236,12 @@ class OpInfoImpl {
ir::Dialect *dialect() const { return dialect_; }
uint32_t AttributeNum() const { return num_attributes_; }
const char *GetAttributeByIndex(size_t idx) const {
return idx < num_attributes_ ? p_attributes_[idx] : nullptr;
}
private:
OpInfoImpl(uint32_t num_interfaces,
uint32_t num_traits,
......
......@@ -13,6 +13,8 @@
// limitations under the License.
#include "paddle/ir/operation.h"
#include "paddle/ir/dialect.h"
#include "paddle/ir/program.h"
#include "paddle/ir/utils.h"
namespace ir {
......@@ -21,7 +23,7 @@ namespace ir {
// OpInlineResult, Operation, Operand.
Operation *Operation::create(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &output_types,
ir::DictionaryAttribute attribute,
const AttributeMap &attribute,
ir::OpInfo op_info) {
// 1. Calculate the required memory size for OpResults + Operation +
// OpOperands.
......@@ -63,7 +65,7 @@ Operation *Operation::create(const std::vector<ir::OpResult> &inputs,
new (base_ptr) detail::OpOperandImpl(inputs[idx].impl_, op);
base_ptr += sizeof(detail::OpOperandImpl);
}
VLOG(4) << "Construct an Operation: " << op->print();
return op;
}
......@@ -83,9 +85,18 @@ void Operation::destroy() {
// 2.1. Deconstruct OpResult.
char *base_ptr = aligned_ptr;
for (size_t idx = num_results_; idx > 0; idx--) {
if (!reinterpret_cast<detail::OpResultImpl *>(base_ptr)->use_empty()) {
throw("Cannot destroy a value that still has uses!");
// release the uses of this result
detail::OpOperandImpl *first_use =
reinterpret_cast<detail::OpResultImpl *>(base_ptr)->first_use();
if (first_use != nullptr) {
first_use->release_source();
detail::OpOperandImpl *next_use = first_use->next_use();
while (next_use != nullptr) {
next_use->release_source();
next_use = next_use->next_use();
}
}
// destory the result
if (idx > max_inline_result_num) {
reinterpret_cast<detail::OpOutlineResultImpl *>(base_ptr)
->~OpOutlineResultImpl();
......@@ -117,11 +128,8 @@ void Operation::destroy() {
Operation::Operation(uint32_t num_results,
uint32_t num_operands,
ir::DictionaryAttribute attribute,
const AttributeMap &attribute,
ir::OpInfo op_info) {
if (!attribute) {
throw("unexpected null attribute dictionary");
}
num_results_ = num_results;
num_operands_ = num_operands;
attribute_ = attribute;
......@@ -153,6 +161,15 @@ ir::OpResult Operation::GetResultByIndex(uint32_t index) {
}
}
ir::OpOperand Operation::GetOperandByIndex(uint32_t index) {
if (index >= num_operands_) {
throw("index exceeds OP input range.");
}
char *ptr = reinterpret_cast<char *>(this) + sizeof(Operation) +
(index) * sizeof(detail::OpOperandImpl);
return ir::OpOperand(reinterpret_cast<detail::OpOperandImpl *>(ptr));
}
std::string Operation::print() {
std::stringstream result;
result << "{ " << num_results_ << " outputs, " << num_operands_
......@@ -173,4 +190,9 @@ std::string Operation::print() {
return result.str();
}
std::string Operation::op_name() const {
return op_info_.impl()->dialect()->name() + "." +
std::string(op_info_.impl()->name());
}
} // namespace ir
......@@ -24,25 +24,35 @@ template <class ConcreteTrait>
class OpTraitBase;
template <typename ConcreteInterface>
class OpInterfaceBase;
class Program;
using AttributeMap = std::unordered_map<std::string, Attribute>;
class alignas(8) Operation final {
public:
///
/// \brief Malloc memory and construct objects in the following order:
/// OpResultImpls|Operation|OpOperandImpls.
/// NOTE: Similar to new and delete, the destroy() and the create() need to be
/// used in conjunction.
///
static Operation *create(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &output_types,
ir::DictionaryAttribute attribute,
const AttributeMap &attribute,
ir::OpInfo op_info);
///
/// \brief Destroy the operation objects and free memeory by create().
///
void destroy();
ir::OpResult GetResultByIndex(uint32_t index);
ir::OpOperand GetOperandByIndex(uint32_t index);
std::string print();
ir::DictionaryAttribute attribute() const { return attribute_; }
const AttributeMap &attribute() const { return attribute_; }
ir::OpInfo op_info() const { return op_info_; }
......@@ -50,6 +60,8 @@ class alignas(8) Operation final {
uint32_t num_operands() const { return num_operands_; }
std::string op_name() const;
template <typename T>
T dyn_cast() const {
return CastUtil<T>::call(this);
......@@ -65,10 +77,16 @@ class alignas(8) Operation final {
return op_info_.HasInterface<Interface>();
}
Program *parent_program() const { return parent_program_; }
void set_parent_program(Program *parent_program) {
parent_program_ = parent_program;
}
private:
Operation(uint32_t num_results,
uint32_t num_operands,
ir::DictionaryAttribute attribute,
const AttributeMap &attribute,
ir::OpInfo op_info);
template <typename T, typename Enabler = void>
......@@ -92,13 +110,15 @@ class alignas(8) Operation final {
}
};
ir::DictionaryAttribute attribute_;
AttributeMap attribute_;
ir::OpInfo op_info_;
uint32_t num_results_ = 0;
uint32_t num_operands_ = 0;
ir::Program *parent_program_{nullptr};
};
} // namespace ir
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/type.h"
namespace ir {
///
/// \brief Parameter represents the weight in the calculation graph.
///
class Parameter {
public:
Parameter(void* data, size_t size, ir::Type type) {
data_ = malloc(size);
memcpy(data_, data, size);
size_ = size;
type_ = type;
}
Parameter(const Parameter& param) {
data_ = malloc(param.size_);
memcpy(data_, param.data_, param.size_);
size_ = param.size_;
type_ = param.type_;
}
Parameter& operator=(const Parameter& param) {
data_ = malloc(param.size_);
memcpy(data_, param.data_, param.size_);
size_ = param.size_;
type_ = param.type_;
return *this;
}
~Parameter() { free(data_); }
Type type() const { return type_; }
void* data() const { return data_; }
bool is_mutable() const { return is_mutable_; }
void set_mutable() { is_mutable_ = true; }
private:
void* data_;
///
/// \brief Number of bytes held in data_.
///
size_t size_;
Type type_;
bool is_mutable_ = false;
};
} // namespace ir
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/ir/program.h"
#include "paddle/ir/ir_context.h"
namespace ir {
Program::~Program() {
for (auto op : ops_) {
op->destroy();
}
}
void Program::InsertOp(Operation* op) {
ops_.push_back(op);
op->set_parent_program(this);
}
Parameter* Program::GetParameter(std::string name) const {
if (parameters_.count(name) != 0) {
return parameters_.at(name).get();
}
return nullptr;
}
void Program::SetParameter(std::string name,
std::unique_ptr<Parameter>&& parameter) {
parameters_[name].reset(parameter.release());
}
} // namespace ir
......@@ -12,66 +12,48 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/ir/builtin_attribute_storage.h"
#include "paddle/ir/builtin_attribute.h"
#include "paddle/ir/utils.h"
namespace ir {
DictionaryAttributeStorage::DictionaryAttributeStorage(const ParamKey &key) {
size_ = key.size();
data_ = reinterpret_cast<NamedAttribute *>(
malloc(sizeof(NamedAttribute) * size_));
uint32_t idx = 0;
for (auto iter = key.begin(); iter != key.end(); ++iter) {
data_[idx].SetName(iter->first);
data_[idx].SetValue(iter->second);
idx++;
}
}
#pragma once
std::size_t DictionaryAttributeStorage::HashValue(const ParamKey &key) {
std::size_t hash_value = key.size();
for (auto iter = key.begin(); iter != key.end(); ++iter) {
hash_value = ir::hash_combine(
hash_value,
std::hash<NamedAttribute>()(NamedAttribute(iter->first, iter->second)));
}
return hash_value;
}
#include <list>
#include <unordered_map>
bool DictionaryAttributeStorage::operator==(const ParamKey &key) const {
uint32_t size = key.size();
if (size_ != size) return false;
uint32_t idx = 0;
for (auto iter = key.begin(); iter != key.end(); ++iter) {
if (data_[idx] != NamedAttribute(iter->first, iter->second)) {
return false;
}
idx++;
}
return true;
}
DictionaryAttributeStorage::ParamKey DictionaryAttributeStorage::GetAsKey()
const {
return ParamKey(data_, data_ + size_);
}
#include "paddle/ir/builtin_attribute.h"
#include "paddle/ir/operation.h"
#include "paddle/ir/parameter.h"
Attribute DictionaryAttributeStorage::GetValue(const StrAttribute &name) const {
size_t left = 0;
size_t right = size_;
while (left < right) {
size_t mid = left + (right - left) / 2;
if (data_[mid].name() == name) {
return data_[mid].value();
} else if (data_[mid].name() < name) {
left = mid + 1;
} else {
right = mid;
}
}
return nullptr;
}
namespace ir {
///
/// \brief Program is an abstraction of model structure, divided into
/// computational graphs and weights. At the current stage, a computational
/// graph is represented in the form of a list<Operation *>. Todo: In the
/// future, detailed design of control flow operators will be carried out, and
/// concepts such as basic blocks, closures, and functions will be introduced to
/// continuously improve Program's ability to represent computational graphs.
///
class Program {
public:
~Program();
std::list<Operation*> ops() const { return ops_; }
size_t parameters_num() const { return parameters_.size(); }
///
/// \brief Insert the Operation* constructed by Operation::create(...) into
/// this Program. NOTE: At this time, the memory management permission of
/// Operation* will be owned by this Program. The user does not need to call
/// Operation::destroy() manually
///
void InsertOp(Operation* op);
Parameter* GetParameter(std::string name) const;
void SetParameter(std::string name, std::unique_ptr<Parameter>&& parameter);
private:
std::list<Operation*> ops_; // owned
std::unordered_map<std::string, std::unique_ptr<Parameter>> parameters_;
};
} // namespace ir
......@@ -57,7 +57,7 @@ class Type {
const Storage *storage() const { return storage_; }
const Dialect &dialect() const { return storage_->abstract_type().dialect(); }
Dialect &dialect() const { return storage_->abstract_type().dialect(); }
IrContext *ir_context() const;
......
......@@ -62,7 +62,7 @@ class AbstractType {
///
/// \return The dialect this type was registered to.
///
const Dialect &dialect() const { return dialect_; }
Dialect &dialect() const { return const_cast<Dialect &>(dialect_); }
///
/// \brief Find the AbstractType instance whose TypeId is type_id from
......
......@@ -106,6 +106,10 @@ ir::Operation *OpOperandImpl::owner() const { return owner_; }
ir::detail::OpOperandImpl *OpOperandImpl::next_use() { return next_use_; }
ir::Value OpOperandImpl::source() const { return source_; }
void OpOperandImpl::release_source() { source_ = nullptr; }
OpOperandImpl::OpOperandImpl(ir::Value source, ir::Operation *owner)
: source_(source), owner_(owner) {
prev_use_addr_ = source.impl()->first_use_addr();
......
......@@ -31,6 +31,10 @@ class OpOperandImpl {
ir::detail::OpOperandImpl *next_use();
ir::Value source() const;
void release_source();
/// Remove this operand from the current use list.
void remove_from_ud_chain();
......
......@@ -3,4 +3,13 @@ if(WITH_NEWIR)
cc_test_old(ir_attribute_test SRCS ir_attribute_test.cc DEPS new_ir gtest)
cc_test_old(ir_value_test SRCS ir_value_test.cc DEPS new_ir gtest)
cc_test_old(ir_op_test SRCS ir_op_test.cc DEPS new_ir gtest)
cc_test_old(
ir_program_test
SRCS
ir_program_test.cc
DEPS
new_ir
pd_dialect
phi
gtest)
endif()
......@@ -58,34 +58,3 @@ TEST(attribute_test, built_in_attribute) {
EXPECT_EQ(string_attr_cast_1.isa<ir::StrAttribute>(), true);
EXPECT_EQ(string_attr_cast_1.size() == 8, 1);
}
TEST(attribute_test, dictionary_attribute) {
ir::IrContext *ctx = ir::IrContext::Instance();
std::string str_attr1_name = "attr1_name";
std::string str_attr1_value = "attr1_value";
ir::StrAttribute attr1_name = ir::StrAttribute::get(ctx, str_attr1_name);
ir::Attribute attr1_value = ir::StrAttribute::get(ctx, str_attr1_value);
std::string str_attr2_name = "attr2_name";
std::string str_attr2_value = "attr2_value";
ir::StrAttribute attr2_name = ir::StrAttribute::get(ctx, str_attr2_name);
ir::Attribute attr2_value = ir::StrAttribute::get(ctx, str_attr2_value);
std::map<ir::StrAttribute, ir::Attribute> named_attr1;
named_attr1.insert(
std::pair<ir::StrAttribute, ir::Attribute>(attr1_name, attr1_value));
named_attr1.insert(
std::pair<ir::StrAttribute, ir::Attribute>(attr2_name, attr2_value));
ir::DictionaryAttribute dic_attr1 =
ir::DictionaryAttribute::get(ctx, named_attr1);
std::map<ir::StrAttribute, ir::Attribute> named_attr2;
named_attr2.insert(
std::pair<ir::StrAttribute, ir::Attribute>(attr2_name, attr2_value));
named_attr2.insert(
std::pair<ir::StrAttribute, ir::Attribute>(attr1_name, attr1_value));
ir::DictionaryAttribute dic_attr2 =
ir::DictionaryAttribute::get(ctx, named_attr2);
EXPECT_EQ(dic_attr1, dic_attr2);
EXPECT_EQ(attr1_value, dic_attr1.GetValue(attr1_name));
EXPECT_EQ(attr2_value, dic_attr1.GetValue(attr2_name));
}
......@@ -99,15 +99,14 @@ class TestDialect : public ir::Dialect {
void initialize() { RegisterOps<Operation1, Operation2>(); }
};
ir::DictionaryAttribute CreateAttribute(std::string attribute_name,
std::string attribute) {
ir::AttributeMap CreateAttributeMap(std::string attribute_name,
std::string attribute) {
ir::IrContext *ctx = ir::IrContext::Instance();
ir::StrAttribute attr_name = ir::StrAttribute::get(ctx, attribute_name);
ir::Attribute attr_value = ir::StrAttribute::get(ctx, attribute);
std::map<ir::StrAttribute, ir::Attribute> named_attr;
named_attr.insert(
std::pair<ir::StrAttribute, ir::Attribute>(attr_name, attr_value));
return ir::DictionaryAttribute::get(ctx, named_attr);
ir::AttributeMap attr_map;
attr_map.insert(
std::pair<std::string, ir::Attribute>(attribute_name, attr_value));
return attr_map;
}
TEST(op_test, op_test) {
......@@ -137,7 +136,7 @@ TEST(op_test, op_test) {
ir::Operation *op =
ir::Operation::create(op_inputs,
op_output_types,
CreateAttribute("op1_name", "op1_attr"),
CreateAttributeMap("op1_name", "op1_attr"),
op2_info);
if (op->HasTrait<ReadOnlyTrait>()) {
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "paddle/fluid/dialect/pd_dialect.h"
#include "paddle/fluid/dialect/pd_type.h"
#include "paddle/fluid/dialect/utils.h"
#include "paddle/ir/builtin_attribute.h"
#include "paddle/ir/builtin_dialect.h"
#include "paddle/ir/builtin_op.h"
#include "paddle/ir/builtin_type.h"
#include "paddle/ir/ir_context.h"
#include "paddle/ir/program.h"
#include "paddle/ir/utils.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
class AddOp : public ir::Op<AddOp> {
public:
using Op::Op;
static const char *name() { return "Add"; }
static const char **attributes_name_;
static uint32_t attributes_num() { return 0; }
};
const char **AddOp::attributes_name_ = nullptr;
TEST(program_test, program) {
// (1) Init environment.
ir::IrContext *ctx = ir::IrContext::Instance();
ir::Dialect *builtin_dialect =
ctx->GetOrRegisterDialect<ir::BuiltinDialect>();
builtin_dialect->RegisterOp<AddOp>();
ir::Dialect *paddle_dialect =
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
// (2) Create an empty program object
ir::Program program;
// ir::Program *program = new ir::Program();
EXPECT_EQ(program.ops().size() == 0, true);
// (3) Create a float32 DenseTensor Parameter and save into Program
ir::Type fp32_dtype = ir::Float32Type::get(ctx);
paddle::dialect::DenseTensorTypeStorage::Dim dims = {2, 2};
paddle::dialect::DenseTensorTypeStorage::DataLayout data_layout =
paddle::dialect::DenseTensorTypeStorage::DataLayout::NCHW;
paddle::dialect::DenseTensorTypeStorage::LoD lod = {{0, 1, 2}};
size_t offset = 0;
ir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get(
ctx, fp32_dtype, dims, data_layout, lod, offset);
std::vector<float> data_a = {1, 2, 3, 4};
std::unique_ptr<ir::Parameter> parameter_a =
std::make_unique<ir::Parameter>(reinterpret_cast<void *>(data_a.data()),
4 * sizeof(float),
dense_tensor_dtype);
program.SetParameter("a", std::move(parameter_a));
EXPECT_EQ(program.parameters_num() == 1, true);
std::vector<float> data_b = {5, 6, 7, 8};
std::unique_ptr<ir::Parameter> parameter_b =
std::make_unique<ir::Parameter>(reinterpret_cast<void *>(data_b.data()),
4 * sizeof(float),
dense_tensor_dtype);
program.SetParameter("b", std::move(parameter_b));
EXPECT_EQ(program.parameters_num() == 2, true);
// (4) Def a = GetParameterOp("a"), and create DenseTensor for a.
std::string op1_name =
builtin_dialect->name() + "." + std::string(ir::GetParameterOp::name());
ir::OpInfoImpl *op1_info = ctx->GetRegisteredOpInfo(op1_name);
std::unordered_map<std::string, ir::Attribute> op1_attribute{
{"parameter_name", ir::StrAttribute::get(ctx, "a")}};
ir::Operation *op1 =
ir::Operation::create({}, {dense_tensor_dtype}, op1_attribute, op1_info);
program.InsertOp(op1);
EXPECT_EQ(op1->GetResultByIndex(0).type().dialect().id(),
paddle_dialect->id());
using Interface = paddle::dialect::ParameterConvertInterface;
Interface *a_interface = op1->GetResultByIndex(0)
.type()
.dialect()
.GetRegisteredInterface<Interface>();
std::shared_ptr<paddle::framework::Variable> a_var =
a_interface->ParameterToVariable(program.GetParameter("a"));
const phi::DenseTensor &a_tensor = a_var->Get<phi::DenseTensor>();
EXPECT_EQ(a_tensor.numel(), 4);
EXPECT_EQ(a_tensor.dims(), phi::DDim(dims.data(), dims.size()));
EXPECT_EQ(a_tensor.dtype(), paddle::dialect::TransToPhiDataType(fp32_dtype));
EXPECT_EQ(a_tensor.layout(),
paddle::dialect::TransToPhiDataLayout(data_layout));
EXPECT_EQ(a_tensor.lod(), lod);
EXPECT_EQ(a_tensor.offset(), offset);
for (int64_t i = 0; i < a_tensor.numel(); i++) {
EXPECT_EQ(*(a_tensor.data<float>() + i), data_a[i]);
}
// (5) Def b = GetParameterOp("b"), and create DenseTensor for b.
std::string op2_name =
builtin_dialect->name() + "." + std::string(ir::GetParameterOp::name());
ir::OpInfoImpl *op2_info = ctx->GetRegisteredOpInfo(op2_name);
std::unordered_map<std::string, ir::Attribute> op2_attribute{
{"parameter_name", ir::StrAttribute::get(ctx, "b")}};
ir::Operation *op2 =
ir::Operation::create({}, {dense_tensor_dtype}, op2_attribute, op2_info);
program.InsertOp(op2);
EXPECT_EQ(op2->GetResultByIndex(0).type().dialect().id(),
paddle_dialect->id());
Interface *b_interface = op2->GetResultByIndex(0)
.type()
.dialect()
.GetRegisteredInterface<Interface>();
std::shared_ptr<paddle::framework::Variable> b_var =
b_interface->ParameterToVariable(program.GetParameter("b"));
const phi::DenseTensor &b_tensor = b_var->Get<phi::DenseTensor>();
EXPECT_EQ(b_tensor.numel(), 4);
EXPECT_EQ(b_tensor.dims(), phi::DDim(dims.data(), dims.size()));
EXPECT_EQ(b_tensor.dtype(), paddle::dialect::TransToPhiDataType(fp32_dtype));
EXPECT_EQ(b_tensor.layout(),
paddle::dialect::TransToPhiDataLayout(data_layout));
EXPECT_EQ(b_tensor.lod(), lod);
EXPECT_EQ(b_tensor.offset(), offset);
for (int64_t i = 0; i < b_tensor.numel(); i++) {
EXPECT_EQ(*(b_tensor.data<float>() + i), data_b[i]);
}
// (6) Def c = AddOp(a, b), execute this op.
std::string op3_name =
builtin_dialect->name() + "." + std::string(AddOp::name());
ir::OpInfoImpl *op3_info = ctx->GetRegisteredOpInfo(op3_name);
std::unordered_map<std::string, ir::Attribute> op3_attribute;
ir::Operation *op3 = ir::Operation::create(
{op1->GetResultByIndex(0), op2->GetResultByIndex(0)},
{dense_tensor_dtype},
op3_attribute,
op3_info);
program.InsertOp(op3);
phi::CPUContext *dev_ctx = static_cast<phi::CPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(
paddle::platform::CPUPlace()));
phi::DenseTensor c_tensor =
phi::Add<float, phi::CPUContext>(*dev_ctx, a_tensor, b_tensor);
std::shared_ptr<paddle::framework::Variable> variable_c =
std::make_shared<paddle::framework::Variable>();
auto *dst_tensor = variable_c->GetMutable<phi::DenseTensor>();
*dst_tensor = c_tensor;
EXPECT_EQ(dst_tensor->numel(), b_tensor.numel());
EXPECT_EQ(dst_tensor->dims(), b_tensor.dims());
EXPECT_EQ(dst_tensor->dtype(), b_tensor.dtype());
EXPECT_EQ(dst_tensor->layout(), b_tensor.layout());
EXPECT_EQ(dst_tensor->lod(), b_tensor.lod());
EXPECT_EQ(dst_tensor->offset(), b_tensor.offset());
for (int64_t i = 0; i < dst_tensor->numel(); i++) {
EXPECT_EQ(*(dst_tensor->data<float>() + i), data_a[i] + data_b[i]);
}
// (7) Def SetParameterOp(c, "c")
std::string op4_name =
builtin_dialect->name() + "." + std::string(ir::SetParameterOp::name());
ir::OpInfoImpl *op4_info = ctx->GetRegisteredOpInfo(op4_name);
std::unordered_map<std::string, ir::Attribute> op4_attribute{
{"parameter_name", ir::StrAttribute::get(ctx, "c")}};
ir::Operation *op4 = ir::Operation::create(
{op3->GetResultByIndex(0)}, {}, op4_attribute, op4_info);
program.InsertOp(op4);
EXPECT_EQ(op4->GetOperandByIndex(0).impl()->source().type().dialect().id(),
paddle_dialect->id());
Interface *c_interface = op4->GetOperandByIndex(0)
.impl()
->source()
.type()
.dialect()
.GetRegisteredInterface<Interface>();
// ir::Parameter *parameter_c =
// c_interface->VariableToParameter(variable_c.get());
std::unique_ptr<ir::Parameter> parameter_c =
c_interface->VariableToParameter(variable_c.get());
EXPECT_EQ(parameter_c->type(), dense_tensor_dtype);
for (int64_t i = 0; i < dst_tensor->numel(); i++) {
EXPECT_EQ(*(dst_tensor->data<float>() + i),
*(static_cast<float *>(parameter_c->data()) + i));
}
program.SetParameter("c", std::move(parameter_c));
// (8) Traverse Program
std::list<ir::Operation *> ops = program.ops();
EXPECT_EQ(ops.size() == 4, true);
EXPECT_EQ(program.parameters_num() == 3, true);
}
......@@ -23,16 +23,14 @@
// This unittest is used to test the construction interfaces of value class and
// operation. The constructed test scenario is: a = OP1(); b = OP2(); c = OP3(a,
// b); d, e, f, g, h, i, j = OP4(a, c);
ir::DictionaryAttribute CreateAttribute(std::string attribute_name,
std::string attribute) {
ir::AttributeMap CreateAttributeMap(std::string attribute_name,
std::string attribute) {
ir::IrContext *ctx = ir::IrContext::Instance();
ir::StrAttribute attr_name = ir::StrAttribute::get(ctx, attribute_name);
ir::Attribute attr_value = ir::StrAttribute::get(ctx, attribute);
std::map<ir::StrAttribute, ir::Attribute> named_attr;
named_attr.insert(
std::pair<ir::StrAttribute, ir::Attribute>(attr_name, attr_value));
return ir::DictionaryAttribute::get(ctx, named_attr);
ir::AttributeMap attr_map;
attr_map.insert(
std::pair<std::string, ir::Attribute>(attribute_name, attr_value));
return attr_map;
}
TEST(value_test, value_test) {
......@@ -43,7 +41,7 @@ TEST(value_test, value_test) {
ir::Operation *op1 =
ir::Operation::create(op1_inputs,
op1_output_types,
CreateAttribute("op1_name", "op1_attr"),
CreateAttributeMap("op1_name", "op1_attr"),
nullptr);
std::cout << op1->print() << std::endl;
// 2. Construct OP2: b = OP2();
......@@ -52,7 +50,7 @@ TEST(value_test, value_test) {
ir::Operation *op2 =
ir::Operation::create(op2_inputs,
op2_output_types,
CreateAttribute("op2_name", "op2_attr"),
CreateAttributeMap("op2_name", "op2_attr"),
nullptr);
std::cout << op2->print() << std::endl;
// 3. Construct OP3: c = OP3(a, b);
......@@ -62,7 +60,7 @@ TEST(value_test, value_test) {
ir::Operation *op3 =
ir::Operation::create(op3_inputs,
op3_output_types,
CreateAttribute("op3_name", "op3_attr"),
CreateAttributeMap("op3_name", "op3_attr"),
nullptr);
std::cout << op3->print() << std::endl;
// 4. Construct OP4: d, e, f, g, h, i, j = OP4(a, c);
......@@ -75,7 +73,7 @@ TEST(value_test, value_test) {
ir::Operation *op4 =
ir::Operation::create(op4_inputs,
op4_output_types,
CreateAttribute("op4_name", "op4_attr"),
CreateAttributeMap("op4_name", "op4_attr"),
nullptr);
std::cout << op4->print() << std::endl;
......
......@@ -124,46 +124,10 @@ TEST(type_test, built_in_type) {
&ir::AbstractType::lookup(int64_1.type_id(), ctx));
EXPECT_EQ(ir::Int64Type::classof(int64_1), 1);
// Test 2: Test the parameteric built-in type of IrContext.
ir::DenseTensorTypeStorage::Dim dims = {1, 2, 3};
ir::DenseTensorTypeStorage::DataLayout data_layout =
ir::DenseTensorTypeStorage::DataLayout::NCHW;
ir::DenseTensorTypeStorage::LoD lod = {{1, 2, 3}, {4, 5, 6}};
size_t offset = 0;
ir::Type dense_tensor_1 =
ir::DenseTensorType::get(ctx, fp32_1, dims, data_layout, lod, offset);
ir::Type dense_tensor_2 =
ir::DenseTensorType::get(ctx, fp32_2, dims, data_layout, lod, offset);
ir::Type dense_tensor_3 =
ir::DenseTensorType::get(ctx, fp32_1, dims, data_layout, lod, 2);
EXPECT_EQ(dense_tensor_1, dense_tensor_2);
EXPECT_NE(dense_tensor_1, dense_tensor_3);
EXPECT_EQ(dense_tensor_1.type_id(), dense_tensor_2.type_id());
EXPECT_EQ(ir::DenseTensorType::classof(dense_tensor_1), 1);
ir::DenseTensorType dense_tensor_4 =
ir::DenseTensorType::get(ctx, fp32_1, dims, data_layout, lod, 2);
EXPECT_EQ(dense_tensor_4.offset() == 2, 1);
EXPECT_EQ(dense_tensor_4.dtype().isa<ir::Float32Type>(), true);
EXPECT_EQ(dense_tensor_4.data_layout(), data_layout);
// Test 3: Test isa and dyn_cast.
// Test 2: Test isa and dyn_cast.
EXPECT_EQ(fp16_1.isa<ir::Float16Type>(), true);
EXPECT_EQ(fp16_1.isa<ir::Float32Type>(), false);
EXPECT_EQ(fp16_1.isa<ir::DenseTensorType>(), false);
EXPECT_EQ(fp16_1.isa<ir::Type>(), true);
EXPECT_EQ(dense_tensor_1.isa<ir::DenseTensorType>(), true);
ir::DenseTensorType dense_tensor_cast_1 =
dense_tensor_1.dyn_cast<ir::DenseTensorType>();
EXPECT_EQ(dense_tensor_cast_1.isa<ir::DenseTensorType>(), true);
EXPECT_EQ(dense_tensor_cast_1.offset() == 0, 1);
const ir::DenseTensorType dense_tensor_cast_2 =
ir::dyn_cast<ir::DenseTensorType>(dense_tensor_1);
EXPECT_EQ(dense_tensor_cast_2.isa<ir::DenseTensorType>(), true);
EXPECT_EQ(dense_tensor_cast_2.offset() == 0, 1);
}
// Customize a parameterized TypeStorage IntegerTypeStorage.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册