diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.cc b/paddle/fluid/ir/dialect/pd_kernel_dialect.cc new file mode 100644 index 0000000000000000000000000000000000000000..a18ba986d57ecc248959f02e1d3ed014636d8fb7 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_dialect.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" +#include "paddle/fluid/ir/dialect/pd_attribute.h" +#include "paddle/fluid/ir/dialect/pd_kernel_op.h" +// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in +// paddle/fluid/ir/dialect/CMakeLists.txt. +#include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/ir/dialect/pd_kernel_type.h" +#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/pd_op.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/ir/core/dialect_interface.h" +#include "paddle/phi/core/dense_tensor.h" + +namespace paddle { +namespace dialect { + +PaddleKernelDialect::PaddleKernelDialect(ir::IrContext *context) + : ir::Dialect(name(), context, ir::TypeId::get()) { + initialize(); +} + +void PaddleKernelDialect::initialize() { + RegisterTypes(); + RegisterOps(); + + // RegisterAttributes(); +} + +void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) { + AllocatedDenseTensorType tensor_type = + type.dyn_cast(); + + os << phi::AllocationTypeStr(tensor_type.place().GetType()) << "_"; + os << "tensor<"; + for (auto d : phi::vectorize(tensor_type.dims())) { + os << d; + os << "x"; + } + tensor_type.dtype().Print(os); + os << ">"; +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_dialect.h b/paddle/fluid/ir/dialect/pd_kernel_dialect.h new file mode 100644 index 0000000000000000000000000000000000000000..e3e4e329be89a4de4eb6cc326421d1e2b46b365f --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_dialect.h @@ -0,0 +1,37 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/variable.h" +#include "paddle/ir/core/dialect.h" +#include "paddle/ir/core/parameter.h" + +namespace paddle { +namespace dialect { + +class PaddleKernelDialect : public ir::Dialect { + public: + explicit PaddleKernelDialect(ir::IrContext* context); + + static const char* name() { return "pd_kernel"; } + + void PrintType(ir::Type type, std::ostream& os); + + private: + void initialize(); +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.cc b/paddle/fluid/ir/dialect/pd_kernel_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..0a04284642f4384d358c1a00d208b0fe0a25cba7 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_op.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_kernel_op.h" + +namespace paddle { +namespace dialect { + +const char *PhiKernelOp::attributes_name[attributes_num] = { + "base_op", "infermeta_fn", "kernel_fn"}; + +void PhiKernelOp::Verify(const std::vector &inputs, + const std::vector &outputs, + const ir::AttributeMap &attributes) { + VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp."; + // Verify inputs type: + + // Verify if attributes contain attribute name in attributes_name: + // if (!attributes.at("parameter_name").isa()) { + // throw("Type of attribute: parameter_name is not right."); +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_op.h b/paddle/fluid/ir/dialect/pd_kernel_op.h new file mode 100644 index 0000000000000000000000000000000000000000..4fc297e8d656e663f5a4bed9513e46ef1b6ccdc3 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_op.h @@ -0,0 +1,35 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/core/builder.h" +#include "paddle/ir/core/op_base.h" + +namespace paddle { +namespace dialect { + +class PhiKernelOp : public ir::Op { + public: + using Op::Op; + static const char *name() { return "phi.kernel"; } + static constexpr uint32_t attributes_num = 3; + static const char *attributes_name[attributes_num]; + static void Verify(const std::vector &inputs, + const std::vector &outputs, + const ir::AttributeMap &attributes); +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.cc b/paddle/fluid/ir/dialect/pd_kernel_type.cc new file mode 100644 index 0000000000000000000000000000000000000000..48fcca97d01c763f052d703f5e3f49b3ddf5d75a --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_type.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/ir/dialect/pd_kernel_type.h" + +namespace paddle { +namespace dialect { + +const phi::Place& AllocatedDenseTensorType::place() const { + return storage()->place_; +} + +const ir::Type& AllocatedDenseTensorType::dtype() const { + return storage()->dense_tensor_type_.dtype(); +} + +const phi::DDim& AllocatedDenseTensorType::dims() const { + return storage()->dense_tensor_type_.dims(); +} + +const phi::DataLayout& AllocatedDenseTensorType::data_layout() const { + return storage()->dense_tensor_type_.data_layout(); +} + +const phi::LoD& AllocatedDenseTensorType::lod() const { + return storage()->dense_tensor_type_.lod(); +} + +const size_t& AllocatedDenseTensorType::offset() const { + return storage()->dense_tensor_type_.offset(); +} + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_type.h b/paddle/fluid/ir/dialect/pd_kernel_type.h new file mode 100644 index 0000000000000000000000000000000000000000..f0e80648fcb9c8b65f47e82992b4628e52737fe1 --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_type.h @@ -0,0 +1,68 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h" +#include "paddle/fluid/ir/dialect/pd_type.h" +#include "paddle/ir/core/type.h" + +namespace paddle { +namespace dialect { +/// +/// \brief Define built-in parametric types. +/// +class AllocatedDenseTensorType : public ir::Type { + public: + using Type::Type; + + DECLARE_TYPE_UTILITY_FUNCTOR(AllocatedDenseTensorType, + AllocatedDenseTensorTypeStorage); + + static AllocatedDenseTensorType get(ir::IrContext *ctx, + phi::Place place, + dialect::DenseTensorType type) { + return ir::TypeManager::template get( + ctx, place, type); + } + + static AllocatedDenseTensorType get(ir::IrContext *ctx, + phi::Place place, + ir::Type dtype, + phi::DDim dims, + phi::DataLayout layout, + phi::LoD lod, + size_t offset) { + dialect::DenseTensorType dense_tensor_type = + dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset); + + return ir::TypeManager::template get( + ctx, place, dense_tensor_type); + } + + const phi::Place &place() const; + + const ir::Type &dtype() const; + + const phi::DDim &dims() const; + + const phi::DataLayout &data_layout() const; + + const phi::LoD &lod() const; + + const size_t &offset() const; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_kernel_type_storage.h b/paddle/fluid/ir/dialect/pd_kernel_type_storage.h new file mode 100644 index 0000000000000000000000000000000000000000..dbee9267545068031716752e97f35bbf9872ba9e --- /dev/null +++ b/paddle/fluid/ir/dialect/pd_kernel_type_storage.h @@ -0,0 +1,92 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/fluid/ir/dialect/pd_type.h" +#include "paddle/ir/core/type.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/core/tensor_meta.h" + +namespace paddle { +namespace dialect { +/// +/// \brief Define Parametric TypeStorage for AllocatedDenseTensorType. +/// +/// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the +/// following methods: (1)declare ParamKey, (2)define Construction method, +/// (3)define HashValue method, (4)overload operator==. +/// +struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { + using Place = phi::Place; + /// + /// \brief Declare ParamKey according to parameter type. + /// + using ParamKey = std::tuple; + + AllocatedDenseTensorTypeStorage(phi::Place place, + dialect::DenseTensorType type) + : place_(place), dense_tensor_type_(type) {} + + /// + /// \brief Each derived TypeStorage must define a Construct method, which + /// StorageManager uses to construct a derived TypeStorage. + /// + static AllocatedDenseTensorTypeStorage *Construct(ParamKey key) { + return new AllocatedDenseTensorTypeStorage(std::get<0>(key), + std::get<1>(key)); + } + + /// + /// \brief Each derived TypeStorage must provide a HashValue method. + /// + static std::size_t HashValue(const ParamKey &key) { + std::size_t hash_value = 0; + // hash place + hash_value = ir::hash_combine(hash_value, std::get<0>(key).HashValue()); + + // hash dtype + auto dense_tensor_type = std::get<1>(key); + hash_value = ir::hash_combine(hash_value, + dialect::DenseTensorTypeStorage::HashValue( + dialect::DenseTensorTypeStorage::ParamKey( + dense_tensor_type.dtype(), + dense_tensor_type.dims(), + dense_tensor_type.data_layout(), + dense_tensor_type.lod(), + dense_tensor_type.offset()))); + return hash_value; + } + + /// + /// \brief Each derived TypeStorage needs to overload operator==. + /// + bool operator==(const ParamKey &key) const { + return ParamKey(place_, dense_tensor_type_) == key; + } + + ParamKey GetAsKey() const { return ParamKey(place_, dense_tensor_type_); } + + /// + /// \brief AllocatedDenseTensorTypeStorage include five parameters: place, + /// DenseTensorType + /// + phi::Place place_; + dialect::DenseTensorType dense_tensor_type_; +}; + +} // namespace dialect +} // namespace paddle diff --git a/paddle/fluid/ir/dialect/pd_type_storage.h b/paddle/fluid/ir/dialect/pd_type_storage.h index dbdb3b374e4d223b89280e67bf27bf858bad2f81..c2de288f2a5922e665c7c8e93b205ce6e22489f7 100644 --- a/paddle/fluid/ir/dialect/pd_type_storage.h +++ b/paddle/fluid/ir/dialect/pd_type_storage.h @@ -112,6 +112,15 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key; } + bool operator==(const DenseTensorTypeStorage &storage) const { + return ParamKey(dtype_, dims_, layout_, lod_, offset_) == + ParamKey(storage.dtype_, + storage.dims_, + storage.layout_, + storage.lod_, + storage.offset_); + } + ParamKey GetAsKey() const { return ParamKey(dtype_, dims_, layout_, lod_, offset_); } diff --git a/paddle/ir/core/storage_manager.cc b/paddle/ir/core/storage_manager.cc index ff985f8e537d1dfe8049a99a80c0ef2465def715..41a52e85b3048abd903e34aab5a544e4f3c9d59a 100644 --- a/paddle/ir/core/storage_manager.cc +++ b/paddle/ir/core/storage_manager.cc @@ -75,8 +75,9 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( VLOG(4) << "Try to get a parametric storage of: [TypeId_hash=" << std::hash()(type_id) << ", param_hash=" << hash_value << "]."; - if (parametric_instance_.find(type_id) == parametric_instance_.end()) + if (parametric_instance_.find(type_id) == parametric_instance_.end()) { throw("The input data pointer is null."); + } ParametricStorageManager ¶metric_storage = *parametric_instance_[type_id]; return parametric_storage.GetOrCreate(hash_value, equal_func, constructor); } diff --git a/test/cpp/ir/core/CMakeLists.txt b/test/cpp/ir/core/CMakeLists.txt index 111da5c3e29ef52d46b9db084d83d13315e445ac..f2d119742e168057d8a6a160814cceb63ab7dbba 100644 --- a/test/cpp/ir/core/CMakeLists.txt +++ b/test/cpp/ir/core/CMakeLists.txt @@ -12,6 +12,16 @@ cc_test_old( phi gtest) +cc_test_old( + ir_phi_kernel_op_test + SRCS + ir_phi_kernel_op_test.cc + DEPS + new_ir + pd_dialect + phi + gtest) + cc_test_old( ir_infershape_test SRCS diff --git a/test/cpp/ir/core/ir_phi_kernel_op_test.cc b/test/cpp/ir/core/ir_phi_kernel_op_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..8bb37b8e922bedfd0ef5872e63d4d81ae605c71b --- /dev/null +++ b/test/cpp/ir/core/ir_phi_kernel_op_test.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/ir/dialect/pd_dialect.h" +#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h" +#include "paddle/fluid/ir/dialect/pd_kernel_op.h" +#include "paddle/fluid/ir/dialect/pd_kernel_type.h" +#include "paddle/fluid/ir/dialect/utils.h" +#include "paddle/fluid/ir/interface/op_yaml_info.h" +#include "paddle/ir/core/block.h" +#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/ir/core/builtin_dialect.h" +#include "paddle/ir/core/builtin_op.h" +#include "paddle/ir/core/ir_context.h" +#include "paddle/ir/core/program.h" +#include "paddle/ir/core/utils.h" +#include "paddle/phi/core/meta_tensor.h" +#include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/kernels/elementwise_add_kernel.h" + +TEST(program_test, program) { + // (1) Init environment. + ir::IrContext *ctx = ir::IrContext::Instance(); + auto kernel_dialect = + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); + + // (2) Create an empty program object + ir::Program program(ctx); + + // (3) Create a float32 DenseTensor Parameter and save into Program + phi::Place place(phi::AllocationType::CPU); + ir::Type fp32_dtype = ir::Float32Type::get(ctx); + phi::DDim dims = {2, 2}; + phi::DataLayout data_layout = phi::DataLayout::NCHW; + phi::LoD lod = {{0, 1, 2}}; + size_t offset = 0; + + std::string op1_name = paddle::dialect::PhiKernelOp::name(); + + ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + + std::unordered_map op1_attribute{ + {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; + + auto allocated_dense_tensor_dtype = + paddle::dialect::AllocatedDenseTensorType::get( + ctx, place, fp32_dtype, dims, data_layout, lod, offset); + std::stringstream ss; + kernel_dialect->PrintType(allocated_dense_tensor_dtype, ss); + ASSERT_EQ(ss.str() == "cpu_tensor<2x2xf32>", true); + ASSERT_EQ(allocated_dense_tensor_dtype.place() == place, true); + ASSERT_EQ(allocated_dense_tensor_dtype.dims() == dims, true); + ASSERT_EQ(allocated_dense_tensor_dtype.data_layout() == data_layout, true); + ASSERT_EQ(allocated_dense_tensor_dtype.lod() == lod, true); + ASSERT_EQ(allocated_dense_tensor_dtype.offset() == 0, true); + + ir::Operation *op1 = ir::Operation::Create( + {}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info); + + ASSERT_EQ(op1 != nullptr, true); +}