未验证 提交 950a29b2 编写于 作者: H hong 提交者: GitHub

[IR]add kernel dialect (#54428)

* add kernel dialect

* change DenseTensorTypeStorage to DenseTensorType

* add test case`
上级 b3da971d
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/dialect/pd_kernel_op.h"
// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in
// paddle/fluid/ir/dialect/CMakeLists.txt.
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/ir/dialect/pd_kernel_type.h"
#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h"
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/ir/core/dialect_interface.h"
#include "paddle/phi/core/dense_tensor.h"
namespace paddle {
namespace dialect {
PaddleKernelDialect::PaddleKernelDialect(ir::IrContext *context)
: ir::Dialect(name(), context, ir::TypeId::get<PaddleKernelDialect>()) {
initialize();
}
void PaddleKernelDialect::initialize() {
RegisterTypes<paddle::dialect::AllocatedDenseTensorType>();
RegisterOps<dialect::PhiKernelOp>();
// RegisterAttributes<paddle::dialect::IntArrayAttribute,
// paddle::dialect::DataTypeAttribute,
// paddle::dialect::PlaceAttribute,
// paddle::dialect::DataLayoutAttribute>();
}
void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) {
AllocatedDenseTensorType tensor_type =
type.dyn_cast<AllocatedDenseTensorType>();
os << phi::AllocationTypeStr(tensor_type.place().GetType()) << "_";
os << "tensor<";
for (auto d : phi::vectorize(tensor_type.dims())) {
os << d;
os << "x";
}
tensor_type.dtype().Print(os);
os << ">";
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/variable.h"
#include "paddle/ir/core/dialect.h"
#include "paddle/ir/core/parameter.h"
namespace paddle {
namespace dialect {
class PaddleKernelDialect : public ir::Dialect {
public:
explicit PaddleKernelDialect(ir::IrContext* context);
static const char* name() { return "pd_kernel"; }
void PrintType(ir::Type type, std::ostream& os);
private:
void initialize();
};
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_kernel_op.h"
namespace paddle {
namespace dialect {
const char *PhiKernelOp::attributes_name[attributes_num] = {
"base_op", "infermeta_fn", "kernel_fn"};
void PhiKernelOp::Verify(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &outputs,
const ir::AttributeMap &attributes) {
VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp.";
// Verify inputs type:
// Verify if attributes contain attribute name in attributes_name:
// if (!attributes.at("parameter_name").isa<StrAttribute>()) {
// throw("Type of attribute: parameter_name is not right.");
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/core/builder.h"
#include "paddle/ir/core/op_base.h"
namespace paddle {
namespace dialect {
class PhiKernelOp : public ir::Op<PhiKernelOp> {
public:
using Op::Op;
static const char *name() { return "phi.kernel"; }
static constexpr uint32_t attributes_num = 3;
static const char *attributes_name[attributes_num];
static void Verify(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &outputs,
const ir::AttributeMap &attributes);
};
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_kernel_type.h"
namespace paddle {
namespace dialect {
const phi::Place& AllocatedDenseTensorType::place() const {
return storage()->place_;
}
const ir::Type& AllocatedDenseTensorType::dtype() const {
return storage()->dense_tensor_type_.dtype();
}
const phi::DDim& AllocatedDenseTensorType::dims() const {
return storage()->dense_tensor_type_.dims();
}
const phi::DataLayout& AllocatedDenseTensorType::data_layout() const {
return storage()->dense_tensor_type_.data_layout();
}
const phi::LoD& AllocatedDenseTensorType::lod() const {
return storage()->dense_tensor_type_.lod();
}
const size_t& AllocatedDenseTensorType::offset() const {
return storage()->dense_tensor_type_.offset();
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/ir/core/type.h"
namespace paddle {
namespace dialect {
///
/// \brief Define built-in parametric types.
///
class AllocatedDenseTensorType : public ir::Type {
public:
using Type::Type;
DECLARE_TYPE_UTILITY_FUNCTOR(AllocatedDenseTensorType,
AllocatedDenseTensorTypeStorage);
static AllocatedDenseTensorType get(ir::IrContext *ctx,
phi::Place place,
dialect::DenseTensorType type) {
return ir::TypeManager::template get<AllocatedDenseTensorType>(
ctx, place, type);
}
static AllocatedDenseTensorType get(ir::IrContext *ctx,
phi::Place place,
ir::Type dtype,
phi::DDim dims,
phi::DataLayout layout,
phi::LoD lod,
size_t offset) {
dialect::DenseTensorType dense_tensor_type =
dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset);
return ir::TypeManager::template get<AllocatedDenseTensorType>(
ctx, place, dense_tensor_type);
}
const phi::Place &place() const;
const ir::Type &dtype() const;
const phi::DDim &dims() const;
const phi::DataLayout &data_layout() const;
const phi::LoD &lod() const;
const size_t &offset() const;
};
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <type_traits>
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/ir/core/type.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/core/tensor_meta.h"
namespace paddle {
namespace dialect {
///
/// \brief Define Parametric TypeStorage for AllocatedDenseTensorType.
///
/// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the
/// following methods: (1)declare ParamKey, (2)define Construction method,
/// (3)define HashValue method, (4)overload operator==.
///
struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage {
using Place = phi::Place;
///
/// \brief Declare ParamKey according to parameter type.
///
using ParamKey = std::tuple<phi::Place, dialect::DenseTensorType>;
AllocatedDenseTensorTypeStorage(phi::Place place,
dialect::DenseTensorType type)
: place_(place), dense_tensor_type_(type) {}
///
/// \brief Each derived TypeStorage must define a Construct method, which
/// StorageManager uses to construct a derived TypeStorage.
///
static AllocatedDenseTensorTypeStorage *Construct(ParamKey key) {
return new AllocatedDenseTensorTypeStorage(std::get<0>(key),
std::get<1>(key));
}
///
/// \brief Each derived TypeStorage must provide a HashValue method.
///
static std::size_t HashValue(const ParamKey &key) {
std::size_t hash_value = 0;
// hash place
hash_value = ir::hash_combine(hash_value, std::get<0>(key).HashValue());
// hash dtype
auto dense_tensor_type = std::get<1>(key);
hash_value = ir::hash_combine(hash_value,
dialect::DenseTensorTypeStorage::HashValue(
dialect::DenseTensorTypeStorage::ParamKey(
dense_tensor_type.dtype(),
dense_tensor_type.dims(),
dense_tensor_type.data_layout(),
dense_tensor_type.lod(),
dense_tensor_type.offset())));
return hash_value;
}
///
/// \brief Each derived TypeStorage needs to overload operator==.
///
bool operator==(const ParamKey &key) const {
return ParamKey(place_, dense_tensor_type_) == key;
}
ParamKey GetAsKey() const { return ParamKey(place_, dense_tensor_type_); }
///
/// \brief AllocatedDenseTensorTypeStorage include five parameters: place,
/// DenseTensorType
///
phi::Place place_;
dialect::DenseTensorType dense_tensor_type_;
};
} // namespace dialect
} // namespace paddle
......@@ -112,6 +112,15 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key;
}
bool operator==(const DenseTensorTypeStorage &storage) const {
return ParamKey(dtype_, dims_, layout_, lod_, offset_) ==
ParamKey(storage.dtype_,
storage.dims_,
storage.layout_,
storage.lod_,
storage.offset_);
}
ParamKey GetAsKey() const {
return ParamKey(dtype_, dims_, layout_, lod_, offset_);
}
......
......@@ -75,8 +75,9 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl(
VLOG(4) << "Try to get a parametric storage of: [TypeId_hash="
<< std::hash<ir::TypeId>()(type_id) << ", param_hash=" << hash_value
<< "].";
if (parametric_instance_.find(type_id) == parametric_instance_.end())
if (parametric_instance_.find(type_id) == parametric_instance_.end()) {
throw("The input data pointer is null.");
}
ParametricStorageManager &parametric_storage = *parametric_instance_[type_id];
return parametric_storage.GetOrCreate(hash_value, equal_func, constructor);
}
......
......@@ -12,6 +12,16 @@ cc_test_old(
phi
gtest)
cc_test_old(
ir_phi_kernel_op_test
SRCS
ir_phi_kernel_op_test.cc
DEPS
new_ir
pd_dialect
phi
gtest)
cc_test_old(
ir_infershape_test
SRCS
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <sstream>
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h"
#include "paddle/fluid/ir/dialect/pd_kernel_op.h"
#include "paddle/fluid/ir/dialect/pd_kernel_type.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/block.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_dialect.h"
#include "paddle/ir/core/builtin_op.h"
#include "paddle/ir/core/ir_context.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
TEST(program_test, program) {
// (1) Init environment.
ir::IrContext *ctx = ir::IrContext::Instance();
auto kernel_dialect =
ctx->GetOrRegisterDialect<paddle::dialect::PaddleKernelDialect>();
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
// (2) Create an empty program object
ir::Program program(ctx);
// (3) Create a float32 DenseTensor Parameter and save into Program
phi::Place place(phi::AllocationType::CPU);
ir::Type fp32_dtype = ir::Float32Type::get(ctx);
phi::DDim dims = {2, 2};
phi::DataLayout data_layout = phi::DataLayout::NCHW;
phi::LoD lod = {{0, 1, 2}};
size_t offset = 0;
std::string op1_name = paddle::dialect::PhiKernelOp::name();
ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name);
std::unordered_map<std::string, ir::Attribute> op1_attribute{
{"parameter_name", ir::StrAttribute::get(ctx, "a")}};
auto allocated_dense_tensor_dtype =
paddle::dialect::AllocatedDenseTensorType::get(
ctx, place, fp32_dtype, dims, data_layout, lod, offset);
std::stringstream ss;
kernel_dialect->PrintType(allocated_dense_tensor_dtype, ss);
ASSERT_EQ(ss.str() == "cpu_tensor<2x2xf32>", true);
ASSERT_EQ(allocated_dense_tensor_dtype.place() == place, true);
ASSERT_EQ(allocated_dense_tensor_dtype.dims() == dims, true);
ASSERT_EQ(allocated_dense_tensor_dtype.data_layout() == data_layout, true);
ASSERT_EQ(allocated_dense_tensor_dtype.lod() == lod, true);
ASSERT_EQ(allocated_dense_tensor_dtype.offset() == 0, true);
ir::Operation *op1 = ir::Operation::Create(
{}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info);
ASSERT_EQ(op1 != nullptr, true);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册