未验证 提交 bb848e6b 编写于 作者: H hong 提交者: GitHub

[IR]Lower pd op to kernel dialect (#54469)

* add kernel dialect

* change DenseTensorTypeStorage to DenseTensorType

* add test case`

* add first pd_op to kernel dialect

* lower pd op to kernel dialect

* update

* update

* remove useless code

* add attrite print test

* fix bug

* polish code
上级 eac99c5b
add_subdirectory(interface)
add_subdirectory(dialect)
add_subdirectory(pass)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/ir/dialect/kernel_attribute_storage.h"
#include "paddle/ir/core/attribute.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace dialect {
class KernelAttribute : public ir::Attribute {
public:
using Attribute::Attribute;
DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(KernelAttribute, KernelAttributeStorage);
bool operator<(const KernelAttribute &right) const {
return storage() < right.storage();
}
phi::KernelKey data() const { return storage()->GetAsKey(); }
};
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/core/attribute.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/kernel_factory.h"
namespace paddle {
namespace dialect {
struct KernelAttributeStorage : public ir::AttributeStorage {
using ParamKey = phi::KernelKey;
explicit KernelAttributeStorage(const ParamKey &key) { kernel_key_ = key; }
static KernelAttributeStorage *Construct(ParamKey key) {
return new KernelAttributeStorage(key);
}
static std::size_t HashValue(const ParamKey &key) {
auto t = phi::KernelKey::Hash()(key);
return t;
}
bool operator==(const ParamKey &key) const { return kernel_key_ == key; }
ParamKey GetAsKey() const { return kernel_key_; }
private:
phi::KernelKey kernel_key_;
};
} // namespace dialect
} // namespace paddle
......@@ -12,15 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h"
#include "paddle/fluid/ir/dialect/kernel_dialect.h"
#include "paddle/fluid/ir/dialect/kernel_op.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/dialect/pd_kernel_op.h"
// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in
// paddle/fluid/ir/dialect/CMakeLists.txt.
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/ir/dialect/pd_kernel_type.h"
#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
#include "paddle/fluid/ir/dialect/kernel_type_storage.h"
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/ir/core/dialect_interface.h"
......@@ -38,13 +39,10 @@ void PaddleKernelDialect::initialize() {
RegisterTypes<paddle::dialect::AllocatedDenseTensorType>();
RegisterOps<dialect::PhiKernelOp>();
// RegisterAttributes<paddle::dialect::IntArrayAttribute,
// paddle::dialect::DataTypeAttribute,
// paddle::dialect::PlaceAttribute,
// paddle::dialect::DataLayoutAttribute>();
RegisterAttributes<paddle::dialect::KernelAttribute>();
}
void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) {
void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) const {
AllocatedDenseTensorType tensor_type =
type.dyn_cast<AllocatedDenseTensorType>();
......@@ -58,5 +56,13 @@ void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) {
os << ">";
}
void PaddleKernelDialect::PrintAttribute(ir::Attribute attr,
std::ostream &os) const {
phi::KernelKey kernel = attr.dyn_cast<KernelAttribute>().data();
os << "<backend:" << kernel.backend() << "|layout:" << kernel.layout()
<< "|dtype:" << kernel.dtype() << ">";
}
} // namespace dialect
} // namespace paddle
......@@ -27,7 +27,9 @@ class PaddleKernelDialect : public ir::Dialect {
static const char* name() { return "pd_kernel"; }
void PrintType(ir::Type type, std::ostream& os);
void PrintType(ir::Type type, std::ostream& os) const override;
void PrintAttribute(ir::Attribute attr, std::ostream& os) const override;
private:
void initialize();
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_kernel_op.h"
#include "paddle/fluid/ir/dialect/kernel_op.h"
namespace paddle {
namespace dialect {
......@@ -23,7 +23,8 @@ const char *PhiKernelOp::attributes_name[attributes_num] = {
void PhiKernelOp::Verify(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &outputs,
const ir::AttributeMap &attributes) {
VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp.";
VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp.";
// Verify inputs type:
// Verify if attributes contain attribute name in attributes_name:
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/pd_kernel_type.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
namespace paddle {
namespace dialect {
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/fluid/ir/dialect/pd_kernel_type_storage.h"
#include "paddle/fluid/ir/dialect/kernel_type_storage.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/ir/core/type.h"
......@@ -31,18 +31,18 @@ class AllocatedDenseTensorType : public ir::Type {
AllocatedDenseTensorTypeStorage);
static AllocatedDenseTensorType get(ir::IrContext *ctx,
phi::Place place,
const phi::Place &place,
dialect::DenseTensorType type) {
return ir::TypeManager::template get<AllocatedDenseTensorType>(
ctx, place, type);
}
static AllocatedDenseTensorType get(ir::IrContext *ctx,
phi::Place place,
ir::Type dtype,
phi::DDim dims,
phi::DataLayout layout,
phi::LoD lod,
const phi::Place &place,
const ir::Type &dtype,
const phi::DDim &dims,
const phi::DataLayout &layout,
const phi::LoD &lod,
size_t offset) {
dialect::DenseTensorType dense_tensor_type =
dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset);
......
......@@ -110,7 +110,8 @@ OpInfoTuple {op_name}::GetOpInfo() {{
std::vector<paddle::dialect::OpInputInfo> inputs = {{ {inputs} }};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {{ {attributes} }};
std::vector<paddle::dialect::OpOutputInfo> outputs = {{ {outputs} }};
paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{{inplace}}}, {{{view}}});
paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}}, {{{inplace}}}, {{{view}}});
return std::make_tuple(inputs, attributes, outputs, run_time_info);
}}
"""
......@@ -1488,9 +1489,14 @@ def OpGenerator(
kernel_func_str = ""
kernel_param_str = ""
kernel_key_dtype = ""
if op_kernel_map is not None:
kernel_func_str = '", "'.join(op_kernel_map['func'])
kernel_param_str = '", "'.join(op_kernel_map['param'])
if 'data_type' in op_kernel_map and op_kernel_map['data_type']:
kernel_key_dtype = '", "'.join(
op_kernel_map['data_type']['candidates']
)
inplace_str = ""
view_str = ""
......@@ -1513,6 +1519,7 @@ def OpGenerator(
infer_meta_param=infer_meta_param_str,
kernel_func=kernel_func_str,
kernel_param=kernel_param_str,
kernel_key_dtype=kernel_key_dtype,
inplace=inplace_str,
view=view_str,
)
......
......@@ -112,15 +112,6 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key;
}
bool operator==(const DenseTensorTypeStorage &storage) const {
return ParamKey(dtype_, dims_, layout_, lod_, offset_) ==
ParamKey(storage.dtype_,
storage.dims_,
storage.layout_,
storage.lod_,
storage.offset_);
}
ParamKey GetAsKey() const {
return ParamKey(dtype_, dims_, layout_, lod_, offset_);
}
......
......@@ -144,18 +144,21 @@ struct OpRunTimeInfo {
std::vector<std::string> infer_meta_param;
std::vector<std::string> kernel_func;
std::vector<std::string> kernel_param;
std::vector<std::string> kernel_key_dtype;
std::vector<std::pair<std::string, std::string>> inplace;
std::vector<std::pair<std::string, std::string>> view;
OpRunTimeInfo(std::string infer_meta_func,
std::vector<std::string> infer_meta_param,
std::vector<std::string> kernel_func,
std::vector<std::string> kernel_param,
std::vector<std::string> dtype,
std::vector<std::pair<std::string, std::string>> inplace,
std::vector<std::pair<std::string, std::string>> view)
: infer_meta_func(infer_meta_func),
infer_meta_param(infer_meta_param),
kernel_func(kernel_func),
kernel_param(kernel_param),
kernel_key_dtype(dtype),
inplace(inplace),
view(view) {}
};
......
......@@ -19,19 +19,15 @@
class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> {
public:
struct Concept {
explicit Concept(void (*infer_shape)(ir::Operation *,
phi::InferMetaContext *))
explicit Concept(void (*infer_shape)(phi::InferMetaContext *))
: infer_shape_(infer_shape) {}
void (*infer_shape_)(ir::Operation *, phi::InferMetaContext *);
void (*infer_shape_)(phi::InferMetaContext *);
};
template <class ConcreteOp>
struct Model : public Concept {
static void InferShape(ir::Operation *op,
phi::InferMetaContext *infer_meta) {
ConcreteOp concret_op = op->dyn_cast<ConcreteOp>();
if (concret_op == nullptr) throw("concret_op is nullptr");
concret_op.InferShape(infer_meta);
static void InferShape(phi::InferMetaContext *infer_meta) {
return ConcreteOp::InferShape(infer_meta);
}
Model() : Concept(InferShape) {}
......@@ -41,7 +37,7 @@ class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> {
: ir::OpInterfaceBase<InferShapeInterface>(op), impl_(impl) {}
void InferShape(phi::InferMetaContext *infer_meta) {
impl_->infer_shape_(operation(), infer_meta);
impl_->infer_shape_(infer_meta);
}
private:
......
# All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory.
file(GLOB PD_PASS_SRCS "*.cc")
cc_library(
pd_op_to_kernel_pass
SRCS ${PD_PASS_SRCS}
DEPS new_ir phi_utils)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/fluid/ir/dialect/kernel_dialect.h"
#include "paddle/fluid/ir/dialect/kernel_op.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/kernel_factory.h"
namespace paddle {
namespace dialect {
phi::KernelKey GetKernelKey(
ir::Operation* op,
const phi::Place& place,
const std::unordered_map<ir::Value, ir::OpResult>& map_value_pair) {
paddle::dialect::OpYamlInfoInterface op_info_interface =
op->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
auto input_info = std::get<0>(op_info_res);
// only suppurt non vector input for now
std::map<std::string, int> input_map;
int index = 0;
for (auto& t : input_info) {
// todo filter attribute tensor
input_map[t.name] = index++;
}
std::map<std::string, std::string> attr_type_map;
auto attr_info = std::get<1>(op_info_res);
for (auto& t : attr_info) {
VLOG(6) << t.name << "\t" << t.type_name;
attr_type_map[t.name] = t.type_name;
}
auto runtime_info = std::get<3>(op_info_res);
// get dtype infomation
phi::Backend kernel_backend = phi::Backend::UNDEFINED;
phi::DataLayout kernel_layout = phi::DataLayout::UNDEFINED;
phi::DataType kernel_data_type = phi::DataType::UNDEFINED;
auto attr_map = op->attributes();
auto data_type_info = runtime_info.kernel_key_dtype;
if (data_type_info.size() > 0 && data_type_info[0] != "") {
// only support single input and attribute
auto slot_name = data_type_info[0];
if (input_map.count(slot_name)) {
// parse from input
int in_index = input_map.at(slot_name);
dialect::AllocatedDenseTensorType type =
op->GetOperandByIndex(in_index)
.source()
.type()
.dyn_cast<paddle::dialect::AllocatedDenseTensorType>();
kernel_data_type = type.dyn_cast<dialect::DataTypeAttribute>().data();
} else {
PADDLE_ENFORCE_EQ(
attr_type_map.count(slot_name),
true,
phi::errors::PreconditionNotMet("[%s] MUST in attr map", slot_name));
kernel_data_type = attr_map.at(slot_name)
.dyn_cast<paddle::dialect::DataTypeAttribute>()
.data();
}
}
// parse all the input tensor
if (input_map.size() == 0 || op->name() == "pd.full_") {
// all the information have to get from attribute and context
kernel_backend = paddle::experimental::ParseBackend(place);
} else {
paddle::experimental::detail::KernelKeyParser kernel_key_parser;
for (size_t i = 0; i < input_info.size(); ++i) {
// todo filter attribute tensor
auto input_tmp = op->GetOperandByIndex(i).source();
auto new_input_tmp = map_value_pair.at(input_tmp);
dialect::AllocatedDenseTensorType type =
new_input_tmp.type().dyn_cast<dialect::AllocatedDenseTensorType>();
// fake tensor here
auto ptr = new phi::Allocation(nullptr, 0, type.place());
std::shared_ptr<phi::Allocation> holder(ptr);
auto dtype = TransToPhiDataType(type.dtype());
phi::DenseTensorMeta meta(
dtype, type.dims(), type.data_layout(), type.lod(), type.offset());
phi::DenseTensor fake_tensor(holder, meta);
kernel_key_parser.AssignKernelKeySet(fake_tensor);
}
auto kernel_key_set = kernel_key_parser.key_set;
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == phi::Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == phi::DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == phi::DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
phi::KernelKey res(kernel_backend, kernel_layout, kernel_data_type);
return res;
}
std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog) {
auto program = std::make_unique<ir::Program>(ir::IrContext::Instance());
auto block = prog->block();
phi::Place cpu_place(phi::AllocationType::CPU);
ir::IrContext* ctx = ir::IrContext::Instance();
ctx->GetOrRegisterDialect<paddle::dialect::PaddleKernelDialect>();
std::unordered_map<ir::Operation*, ir::Operation*> map_op_pair;
std::unordered_map<ir::Value, ir::OpResult> map_value_pair;
std::string op1_name = paddle::dialect::PhiKernelOp::name();
ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name);
for (auto it = block->begin(); it != block->end(); ++it) {
auto kernel_key = GetKernelKey(*it, cpu_place, map_value_pair);
// create new Op
// only for single output
// need update new kernel key layout and data tyep
auto allocated_dense_tensor_dtype =
paddle::dialect::AllocatedDenseTensorType::get(
ctx,
phi::TransToPhiPlace(kernel_key.backend()),
(*it)
->GetResultByIndex(0)
.type()
.dyn_cast<dialect::DenseTensorType>());
// constuct input
std::vector<ir::OpResult> vec_inputs;
if ((*it)->name() != "pd.full_" && (*it)->num_operands() > 0) {
for (size_t i = 0; i < (*it)->num_operands(); ++i) {
auto cur_in = (*it)->GetOperandByIndex(i).source();
auto new_in = map_value_pair.at(cur_in);
vec_inputs.push_back(new_in);
}
}
paddle::dialect::OpYamlInfoInterface op_info_interface =
(*it)->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
auto runtime_info = std::get<3>(op_info_res);
std::unordered_map<std::string, ir::Attribute> op1_attribute{
{"op_name", ir::StrAttribute::get(ctx, (*it)->name())},
{"kernel_name",
ir::StrAttribute::get(ctx, runtime_info.kernel_func[0])},
{"kernel_key", dialect::KernelAttribute::get(ctx, kernel_key)}};
auto op_attr_map = (*it)->attributes();
for (auto it1 = op_attr_map.begin(); it1 != op_attr_map.end(); ++it1) {
op1_attribute.emplace(it1->first, it1->second);
}
ir::Operation* op1 = ir::Operation::Create(
vec_inputs, op1_attribute, {allocated_dense_tensor_dtype}, op1_info);
map_op_pair[*it] = op1;
map_value_pair[(*it)->GetResultByIndex(0)] = op1->GetResultByIndex(0);
program->block()->push_back(op1);
}
return program;
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/core/program.h"
namespace paddle {
namespace dialect {
std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog);
} // namespace dialect
} // namespace paddle
......@@ -5,3 +5,4 @@ endif()
add_subdirectory(core)
add_subdirectory(pass)
add_subdirectory(pattern_rewrite)
add_subdirectory(kernel_dialect)
......@@ -103,10 +103,7 @@ TEST(program_test, program) {
bool res1 = simple_cmp(out_tensor.data<float>()[1], 1.70047);
bool res2 = simple_cmp(out_tensor.data<float>()[2], 1.56764);
bool res3 = simple_cmp(out_tensor.data<float>()[3], 1.85063);
std::cerr << out_tensor.data<float>()[0] << "\t"
<< out_tensor.data<float>()[1] << "\t"
<< out_tensor.data<float>()[2] << "\t"
<< out_tensor.data<float>()[3] << std::endl;
EXPECT_EQ(res0, true);
EXPECT_EQ(res1, true);
EXPECT_EQ(res2, true);
......
......@@ -15,10 +15,10 @@
#include <gtest/gtest.h>
#include <sstream>
#include "paddle/fluid/ir/dialect/kernel_dialect.h"
#include "paddle/fluid/ir/dialect/kernel_op.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_kernel_dialect.h"
#include "paddle/fluid/ir/dialect/pd_kernel_op.h"
#include "paddle/fluid/ir/dialect/pd_kernel_type.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/block.h"
......
......@@ -18,6 +18,7 @@
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/infershape.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_dialect.h"
......@@ -40,6 +41,7 @@
#include "paddle/fluid/platform/init.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "glog/logging.h"
......@@ -92,14 +94,11 @@ template <typename T>
void build_context(ir::Operation* op,
const std::unordered_map<ir::Value, std::string>& name_map,
paddle::framework::Scope* scope,
const OpInfoTuple& op_yaml_info,
T* ctx,
bool is_infer_meta = true) {
paddle::dialect::OpYamlInfoInterface op_info_interface =
op->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
// inputs include input and mutable attributes
auto input_info = std::get<0>(op_info_res);
auto input_info = std::get<0>(op_yaml_info);
std::map<std::string, size_t> input_index_map;
std::map<std::string, std::string> mutable_attr_type_map;
int input_index = 0;
......@@ -111,7 +110,7 @@ void build_context(ir::Operation* op,
}
}
auto attr_info = std::get<1>(op_info_res);
auto attr_info = std::get<1>(op_yaml_info);
std::map<std::string, std::string> attr_type_map;
for (auto& t : attr_info) {
VLOG(6) << t.name << "\t" << t.type_name;
......@@ -119,7 +118,7 @@ void build_context(ir::Operation* op,
}
auto attr_map = op->attributes();
auto runtime_info = std::get<3>(op_info_res);
auto runtime_info = std::get<3>(op_yaml_info);
// int input_index = 0;
std::vector<std::string> vec_param_list;
......@@ -202,17 +201,18 @@ class PhiKernelAdaptor {
auto attr_map = (*it)->attributes();
paddle::dialect::OpYamlInfoInterface op_info_interface =
(*it)->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
InferShapeInterface interface = (*it)->dyn_cast<InferShapeInterface>();
phi::InferMetaContext ctx;
build_context<phi::InferMetaContext>((*it), name_map, scope_, &ctx);
build_context<phi::InferMetaContext>(
(*it), name_map, scope_, op_info_res, &ctx);
interface.InferShape(&ctx);
paddle::dialect::OpYamlInfoInterface op_info_interface =
(*it)->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
auto runtime_info = std::get<3>(op_info_res);
auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
......@@ -236,7 +236,7 @@ class PhiKernelAdaptor {
phi::KernelContext kernel_ctx(dev_ctx);
build_context<phi::KernelContext>(
(*it), name_map, scope_, &kernel_ctx, false);
(*it), name_map, scope_, op_info_res, &kernel_ctx, false);
found_it->second(&kernel_ctx);
auto out_value = (*it)->result(0);
......@@ -245,6 +245,58 @@ class PhiKernelAdaptor {
}
}
void run_kernel_prog(ir::Program* program) {
auto block = program->block();
std::unordered_map<ir::Value, std::string> name_map;
build_scope(block, scope_, &name_map);
ir::IrContext* ctx = ir::IrContext::Instance();
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace());
phi::Place cpu_place(phi::AllocationType::CPU);
for (auto it = block->begin(); it != block->end(); ++it) {
auto attr_map = (*it)->attributes();
auto op_name = attr_map.at("op_name").dyn_cast<ir::StrAttribute>().data();
ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name);
auto impl =
op1_info.GetInterfaceImpl<paddle::dialect::OpYamlInfoInterface>();
auto yaml_info = impl->get_op_info_();
auto attr_info = std::get<1>(yaml_info);
auto infer_shape_impl = op1_info.GetInterfaceImpl<InferShapeInterface>();
phi::InferMetaContext ctx;
build_context<phi::InferMetaContext>(
(*it), name_map, scope_, yaml_info, &ctx);
infer_shape_impl->infer_shape_(&ctx);
auto kernel_name =
attr_map.at("kernel_name").dyn_cast<ir::StrAttribute>().data();
auto kernel_key = attr_map.at("kernel_key")
.dyn_cast<paddle::dialect::KernelAttribute>()
.data();
auto kernel_fn =
phi::KernelFactory::Instance().SelectKernel(kernel_name, kernel_key);
phi::KernelContext kernel_ctx(dev_ctx);
build_context<phi::KernelContext>(
(*it), name_map, scope_, yaml_info, &kernel_ctx, false);
kernel_fn(&kernel_ctx);
auto out_value = (*it)->GetResultByIndex(0);
out_name = name_map[out_value];
}
}
std::string out_name;
private:
......
cc_test_old(
ir_kernel_dialect_pass_test
SRCS
ir_kernel_dialect_pass_test.cc
DEPS
pd_op_to_kernel_pass
new_ir
pd_dialect
phi
gtest)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <sstream>
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_dialect.h"
#include "paddle/ir/core/builtin_op.h"
#include "paddle/ir/core/ir_context.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "test/cpp/ir/core/phi_kernel_adaptor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/fluid/ir/dialect/kernel_dialect.h"
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(uniform, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
bool simple_cmp(float a, float b) { return std::abs((a - b) / a) < 1e-5; }
TEST(program_test, program) {
// (1) Init environment.
ir::IrContext* ctx = ir::IrContext::Instance();
ir::Program program((ctx));
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
ir::Builder builder = ir::Builder::AtBlockEnd(ctx, program.block());
paddle::dialect::FullOp op1 = builder.Build<paddle::dialect::FullOp>(
std::vector<int64_t>{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace());
paddle::dialect::FullOp op2 = builder.Build<paddle::dialect::FullOp>(
std::vector<int64_t>{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace());
builder.Build<paddle::dialect::AddOp>(op1->GetResultByIndex(0),
op2->GetResultByIndex(0));
auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program);
paddle::framework::Scope scope;
PhiKernelAdaptor phi_kernel_adaptor(&scope);
phi_kernel_adaptor.run_kernel_prog(kernel_program.get());
auto out_tensor =
scope.Var(phi_kernel_adaptor.out_name)->Get<phi::DenseTensor>();
bool res0 = simple_cmp(out_tensor.data<float>()[0], 2.0);
bool res1 = simple_cmp(out_tensor.data<float>()[1], 2.0);
bool res2 = simple_cmp(out_tensor.data<float>()[2], 2.0);
bool res3 = simple_cmp(out_tensor.data<float>()[3], 2.0);
EXPECT_EQ(res0, true);
EXPECT_EQ(res1, true);
EXPECT_EQ(res2, true);
EXPECT_EQ(res3, true);
}
TEST(dialect_attr, attr) {
// (1) Init environment.
ir::IrContext* ctx = ir::IrContext::Instance();
ir::Program program((ctx));
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
auto kernel_dialect =
ctx->GetOrRegisterDialect<paddle::dialect::PaddleKernelDialect>();
phi::KernelKey kernel_key(
phi::Backend::CPU, phi::DataLayout::ALL_LAYOUT, phi::DataType::FLOAT32);
auto attr = paddle::dialect::KernelAttribute::get(ctx, kernel_key);
std::stringstream ss;
kernel_dialect->PrintAttribute(attr, ss);
EXPECT_EQ(
ss.str() == "<backend:CPU|layout:Undefined(AnyLayout)|dtype:float32>",
true);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册