未验证 提交 9137adb9 编写于 作者: Z zhangbo9674 提交者: GitHub

[IR] Refine PhiKernelOp attributes name and delete some unused code (#54891)

* refine code

* add some interface for phi kernel op

* fix compile bug

* delete unused code

* support code

* fix bug

* refine code

* delete unused code

* fix compile bug

* fix compile bug

* delete unused code

* add elementwise add op

* fix compile bug
上级 f7bdd18b
...@@ -967,8 +967,8 @@ void BuildOpFuncList( ...@@ -967,8 +967,8 @@ void BuildOpFuncList(
auto attr_info = std::get<1>(yaml_info); auto attr_info = std::get<1>(yaml_info);
op_func_node.infer_shape_interface_ = op_func_node.infer_meta_interface_ =
op_info.GetInterfaceImpl<paddle::dialect::InferShapeInterface>(); op_info.GetInterfaceImpl<paddle::dialect::InferMetaInterface>();
VLOG(6) << "op name" << op_func_node.phi_op_name_; VLOG(6) << "op name" << op_func_node.phi_op_name_;
......
...@@ -161,7 +161,7 @@ Instruction::Instruction(size_t id, ...@@ -161,7 +161,7 @@ Instruction::Instruction(size_t id,
is_artificial_ = true; is_artificial_ = true;
} }
if (op_func_node_.infer_shape_interface_ != nullptr) { if (op_func_node_.infer_meta_interface_ != nullptr) {
pre_define_context_ = true; pre_define_context_ = true;
} }
PADDLE_ENFORCE_GE(id, PADDLE_ENFORCE_GE(id,
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/variable_helper.h" #include "paddle/fluid/framework/variable_helper.h"
#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/infermeta.h"
#include "paddle/fluid/platform/device_event_base.h" #include "paddle/fluid/platform/device_event_base.h"
#include "paddle/fluid/platform/event.h" #include "paddle/fluid/platform/event.h"
#include "paddle/phi/core/utils/rw_lock.h" #include "paddle/phi/core/utils/rw_lock.h"
...@@ -177,8 +177,7 @@ struct OpFuncNode { ...@@ -177,8 +177,7 @@ struct OpFuncNode {
phi::KernelContext kernel_context_; phi::KernelContext kernel_context_;
phi::InferMetaContext infer_meta_context_; phi::InferMetaContext infer_meta_context_;
std::string phi_op_name_; std::string phi_op_name_;
paddle::dialect::InferShapeInterface::Concept* infer_shape_interface_{ paddle::dialect::InferMetaInterface::Concept* infer_meta_interface_{nullptr};
nullptr};
}; };
class Instruction { class Instruction {
......
...@@ -963,7 +963,7 @@ void NewIRInterpreter::RunInstruction(const Instruction& instr_node) { ...@@ -963,7 +963,7 @@ void NewIRInterpreter::RunInstruction(const Instruction& instr_node) {
VLOG(5) << "run new ir selected kernel"; VLOG(5) << "run new ir selected kernel";
auto op_func_node = const_cast<OpFuncNode*>((instr_node.OpFunc())); auto op_func_node = const_cast<OpFuncNode*>((instr_node.OpFunc()));
VLOG(5) << "begin to run op " << op_func_node->phi_op_name_; VLOG(5) << "begin to run op " << op_func_node->phi_op_name_;
op_func_node->infer_shape_interface_->infer_shape_( op_func_node->infer_meta_interface_->infer_meta_(
&(op_func_node->infer_meta_context_)); &(op_func_node->infer_meta_context_));
VLOG(5) << "after run infer meta"; VLOG(5) << "after run infer meta";
(*(op_func_node->phi_kernel_))(&(op_func_node->kernel_context_)); (*(op_func_node->phi_kernel_))(&(op_func_node->kernel_context_));
......
...@@ -17,8 +17,10 @@ set(op_backward_yaml_file2 ...@@ -17,8 +17,10 @@ set(op_backward_yaml_file2
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/legacy_backward_ops.parsed.yaml ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/legacy_backward_ops.parsed.yaml
) )
set(op_yaml_file3 ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/pd_op.yaml) set(op_yaml_file3 ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/pd_op.yaml)
set(op_yaml_file4
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/pd_legacy_op.yaml)
set(op_yaml_files set(op_yaml_files
${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${op_yaml_file3} ${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${op_yaml_file3},${op_yaml_file4}
) )
set(op_namespace paddle,dialect) set(op_namespace paddle,dialect)
set(dialect_name pd) set(dialect_name pd)
......
...@@ -13,21 +13,57 @@ ...@@ -13,21 +13,57 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/ir/dialect/kernel_op.h" #include "paddle/fluid/ir/dialect/kernel_op.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/phi/core/enforce.h"
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
const char *PhiKernelOp::attributes_name[attributes_num] = { const char* PhiKernelOp::attributes_name[attributes_num] = {
"base_op", "infermeta_fn", "kernel_fn"}; "op_name", "kernel_name", "kernel_key"};
void PhiKernelOp::Verify() { void PhiKernelOp::Verify() {
VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp."; VLOG(4) << "Verifying inputs, outputs and attributes for: PhiKernelOp.";
// Verify inputs type: auto& attributes = this->attributes();
// Verify if attributes contain attribute name in attributes_name: PADDLE_ENFORCE(attributes.count("op_name") > 0 &&
// if (!attributes.at("parameter_name").isa<StrAttribute>()) { attributes.at("op_name").isa<ir::StrAttribute>(),
// throw("Type of attribute: parameter_name is not right."); phi::errors::PreconditionNotMet(
"Type of attribute: op_name is not right."));
PADDLE_ENFORCE(attributes.count("kernel_name") > 0 &&
attributes.at("kernel_name").isa<ir::StrAttribute>(),
phi::errors::PreconditionNotMet(
"Type of attribute: kernel_name is not right."));
PADDLE_ENFORCE(attributes.count("kernel_key") > 0 &&
attributes.at("kernel_key").isa<KernelAttribute>(),
phi::errors::PreconditionNotMet(
"Type of attribute: kernel_key is not right."));
}
const std::string PhiKernelOp::op_name() {
return operation()
->attributes()
.at("op_name")
.dyn_cast<ir::StrAttribute>()
.data();
}
const std::string PhiKernelOp::kernel_name() {
return operation()
->attributes()
.at("kernel_name")
.dyn_cast<ir::StrAttribute>()
.data();
}
phi::KernelKey PhiKernelOp::kernel_key() {
return operation()
->attributes()
.at("kernel_key")
.dyn_cast<KernelAttribute>()
.data();
} }
} // namespace dialect } // namespace dialect
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "paddle/ir/core/builder.h" #include "paddle/ir/core/builder.h"
#include "paddle/ir/core/op_base.h" #include "paddle/ir/core/op_base.h"
#include "paddle/phi/core/kernel_factory.h"
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
...@@ -26,6 +27,9 @@ class PhiKernelOp : public ir::Op<PhiKernelOp> { ...@@ -26,6 +27,9 @@ class PhiKernelOp : public ir::Op<PhiKernelOp> {
static const char *name() { return "phi.kernel"; } static const char *name() { return "phi.kernel"; }
static constexpr uint32_t attributes_num = 3; static constexpr uint32_t attributes_num = 3;
static const char *attributes_name[attributes_num]; static const char *attributes_name[attributes_num];
const std::string op_name();
const std::string kernel_name();
phi::KernelKey kernel_key();
void Verify(); void Verify();
}; };
......
...@@ -39,7 +39,7 @@ H_FILE_TEMPLATE = """#ifdef GET_OP_LIST ...@@ -39,7 +39,7 @@ H_FILE_TEMPLATE = """#ifdef GET_OP_LIST
#include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/dialect/op_yaml_info_util.h" #include "paddle/fluid/ir/dialect/op_yaml_info_util.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/infermeta.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
...@@ -143,7 +143,7 @@ void {op_name}::Build({build_args}) {{ ...@@ -143,7 +143,7 @@ void {op_name}::Build({build_args}) {{
}} }}
""" """
OP_INFER_SHAPE_TEMPLATE = """ OP_INFER_SHAPE_TEMPLATE = """
void {op_name}::InferShape( phi::InferMetaContext *infer_meta ) {{ void {op_name}::InferMeta( phi::InferMetaContext *infer_meta ) {{
auto fn = PD_INFER_META(phi::{infer_meta_func}); auto fn = PD_INFER_META(phi::{infer_meta_func});
fn(infer_meta); fn(infer_meta);
}} }}
...@@ -298,9 +298,9 @@ class OpInfoParser: ...@@ -298,9 +298,9 @@ class OpInfoParser:
self.infer_meta_map = self.parse_infer_meta_map() self.infer_meta_map = self.parse_infer_meta_map()
self.kernel_map = self.parse_kernel_map() self.kernel_map = self.parse_kernel_map()
if 'infer_meta' in self.op_yaml_item: if 'infer_meta' in self.op_yaml_item:
self.infer_shape_func = self.op_yaml_item['infer_meta']["func"] self.infer_meta_func = self.op_yaml_item['infer_meta']["func"]
else: else:
self.infer_shape_func = None self.infer_meta_func = None
# parse inplace && view # parse inplace && view
self.inplace_map = self.parse_op_inplace_info() self.inplace_map = self.parse_op_inplace_info()
...@@ -1218,10 +1218,10 @@ def OpGenerator( ...@@ -1218,10 +1218,10 @@ def OpGenerator(
op_traits = [] op_traits = []
exclusive_interface_str = "" exclusive_interface_str = ""
if op_info.infer_shape_func: if op_info.infer_meta_func:
op_interfaces += ["InferShapeInterface"] op_interfaces += ["InferMetaInterface"]
exclusive_interface_str += ( exclusive_interface_str += (
" static void InferShape( phi::InferMetaContext *infer_meta );" " static void InferMeta( phi::InferMetaContext *infer_meta );"
) )
# If op has inplace info, we will generate inplace op and non-inplace op. # If op has inplace info, we will generate inplace op and non-inplace op.
...@@ -1472,11 +1472,11 @@ def OpGenerator( ...@@ -1472,11 +1472,11 @@ def OpGenerator(
op_output_optional_list, op_output_optional_list,
) )
op_infer_shape_str = "" op_infer_meta_str = ""
if op_info.infer_shape_func: if op_info.infer_meta_func:
op_infer_shape_str = OP_INFER_SHAPE_TEMPLATE.format( op_infer_meta_str = OP_INFER_SHAPE_TEMPLATE.format(
op_name=op_class_name, op_name=op_class_name,
infer_meta_func=op_info.infer_shape_func, infer_meta_func=op_info.infer_meta_func,
) )
ops_name_list.append(op_class_name) ops_name_list.append(op_class_name)
...@@ -1487,7 +1487,7 @@ def OpGenerator( ...@@ -1487,7 +1487,7 @@ def OpGenerator(
if len(op_mutable_attribute_name_list) > 0: if len(op_mutable_attribute_name_list) > 0:
ops_defined_list.append(build_func_with_muta_attr_is_input) ops_defined_list.append(build_func_with_muta_attr_is_input)
ops_defined_list.append(op_verify_str) ops_defined_list.append(op_verify_str)
ops_defined_list.append(op_infer_shape_str) ops_defined_list.append(op_infer_meta_str)
# (4) Generate head file str # (4) Generate head file str
op_namespaces_prev = "" op_namespaces_prev = ""
......
- name: elementwise_add
inputs:
- typename: Tensor
name: x
optional: false
no_need_buffer: false
data_transform: {}
- typename: Tensor
name: y
optional: false
no_need_buffer: false
data_transform: {}
attrs:
- {typename: int, name: axis}
outputs:
- {typename: Tensor, name: out, optional: false, intermediate: false}
no_need_buffer: null
data_transform: null
infer_meta:
func: ElementwiseInferMeta
param: [x, y]
kernel:
func: [add_raw]
param: [x, y]
backend: null
layout: null
data_type: null
dispatch: {add: null}
force_backend: null
inplace: {out: x}
view: null
backward: add_grad
...@@ -18,28 +18,28 @@ ...@@ -18,28 +18,28 @@
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> { class InferMetaInterface : public ir::OpInterfaceBase<InferMetaInterface> {
public: public:
struct Concept { struct Concept {
explicit Concept(void (*infer_shape)(phi::InferMetaContext *)) explicit Concept(void (*infer_meta)(phi::InferMetaContext *))
: infer_shape_(infer_shape) {} : infer_meta_(infer_meta) {}
void (*infer_shape_)(phi::InferMetaContext *); void (*infer_meta_)(phi::InferMetaContext *);
}; };
template <class ConcreteOp> template <class ConcreteOp>
struct Model : public Concept { struct Model : public Concept {
static void InferShape(phi::InferMetaContext *infer_meta) { static void InferMeta(phi::InferMetaContext *infer_meta) {
return ConcreteOp::InferShape(infer_meta); return ConcreteOp::InferMeta(infer_meta);
} }
Model() : Concept(InferShape) {} Model() : Concept(InferMeta) {}
}; };
InferShapeInterface(ir::Operation *op, Concept *impl) InferMetaInterface(ir::Operation *op, Concept *impl)
: ir::OpInterfaceBase<InferShapeInterface>(op), impl_(impl) {} : ir::OpInterfaceBase<InferMetaInterface>(op), impl_(impl) {}
void InferShape(phi::InferMetaContext *infer_meta) { void InferMeta(phi::InferMetaContext *infer_meta) {
impl_->infer_shape_(infer_meta); impl_->infer_meta_(infer_meta);
} }
private: private:
...@@ -49,4 +49,4 @@ class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> { ...@@ -49,4 +49,4 @@ class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> {
} // namespace dialect } // namespace dialect
} // namespace paddle } // namespace paddle
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::InferShapeInterface) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::InferMetaInterface)
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/infermeta.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/fluid/ir/interface/op_yaml_info.h"
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InferShapeInterface) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InferMetaInterface)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::OpYamlInfoInterface) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::OpYamlInfoInterface)
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "paddle/fluid/ir/dialect/pd_op.h" #include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/infermeta.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_dialect.h" #include "paddle/ir/core/builtin_dialect.h"
...@@ -52,59 +52,6 @@ class PhiKernelAdaptor { ...@@ -52,59 +52,6 @@ class PhiKernelAdaptor {
public: public:
explicit PhiKernelAdaptor(paddle::framework::Scope* scope) : scope_(scope) {} explicit PhiKernelAdaptor(paddle::framework::Scope* scope) : scope_(scope) {}
void run(ir::Program* program) {
auto block = program->block();
std::unordered_map<ir::Value, std::string> name_map;
ir::BuildScope(block, scope_, &name_map);
auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace());
phi::Place cpu_place(phi::AllocationType::CPU);
for (auto it = block->begin(); it != block->end(); ++it) {
VLOG(6) << "begin to run op " << (*it)->name();
auto attr_map = (*it)->attributes();
paddle::dialect::OpYamlInfoInterface op_info_interface =
(*it)->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
paddle::dialect::InferShapeInterface interface =
(*it)->dyn_cast<paddle::dialect::InferShapeInterface>();
phi::InferMetaContext ctx;
ir::BuildInferMetaContext((*it), name_map, scope_, op_info_res, &ctx);
interface.InferShape(&ctx);
auto runtime_info = std::get<3>(op_info_res);
auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
runtime_info.kernel_func[0]);
phi::KernelKey kernel_key(phi::TransToPhiBackend(cpu_place),
phi::DataLayout::ANY,
phi::DataType::FLOAT32);
if (runtime_info.kernel_func[0] == "full_int_array") {
kernel_key.set_dtype(phi::DataType::INT64);
}
auto found_it = phi_kernels.find(kernel_key);
if (found_it == phi_kernels.end()) {
PADDLE_THROW(paddle::platform::errors::NotFound(
"can not found kerenl for [%s]", (*it)->name()));
} else {
phi::KernelContext kernel_ctx(dev_ctx);
ir::BuildPhiKernelContext(
(*it), name_map, scope_, op_info_res, &kernel_ctx);
found_it->second(&kernel_ctx);
auto out_value = (*it)->result(0);
out_name = name_map[out_value];
}
}
}
void run_kernel_prog(ir::Program* program) { void run_kernel_prog(ir::Program* program) {
auto block = program->block(); auto block = program->block();
std::unordered_map<ir::Value, std::string> name_map; std::unordered_map<ir::Value, std::string> name_map;
...@@ -128,14 +75,14 @@ class PhiKernelAdaptor { ...@@ -128,14 +75,14 @@ class PhiKernelAdaptor {
auto attr_info = std::get<1>(yaml_info); auto attr_info = std::get<1>(yaml_info);
auto infer_shape_impl = auto infer_meta_impl =
op1_info.GetInterfaceImpl<paddle::dialect::InferShapeInterface>(); op1_info.GetInterfaceImpl<paddle::dialect::InferMetaInterface>();
phi::InferMetaContext ctx; phi::InferMetaContext ctx;
ir::BuildInferMetaContext((*it), name_map, scope_, yaml_info, &ctx); ir::BuildInferMetaContext((*it), name_map, scope_, yaml_info, &ctx);
infer_shape_impl->infer_shape_(&ctx); infer_meta_impl->infer_meta_(&ctx);
auto kernel_name = auto kernel_name =
attr_map.at("kernel_name").dyn_cast<ir::StrAttribute>().data(); attr_map.at("kernel_name").dyn_cast<ir::StrAttribute>().data();
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "paddle/fluid/framework/string_array.h" #include "paddle/fluid/framework/string_array.h"
#include "paddle/fluid/framework/tensor_ref_array.h" #include "paddle/fluid/framework/tensor_ref_array.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
...@@ -74,8 +75,6 @@ void BuildScope(ir::Block* block, ...@@ -74,8 +75,6 @@ void BuildScope(ir::Block* block,
// TODO(phlrain): need to update here, support StringTensor // TODO(phlrain): need to update here, support StringTensor
auto out_tensor = var->GetMutable<phi::DenseTensor>(); auto out_tensor = var->GetMutable<phi::DenseTensor>();
name_map->emplace(ptr, name);
auto feed_var = scope->Var("feed"); auto feed_var = scope->Var("feed");
int index = int index =
(*it)->attributes().at("col").dyn_cast<ir::Int32Attribute>().data(); (*it)->attributes().at("col").dyn_cast<ir::Int32Attribute>().data();
...@@ -117,6 +116,8 @@ void BuildScope(ir::Block* block, ...@@ -117,6 +116,8 @@ void BuildScope(ir::Block* block,
continue; continue;
} }
// TODO(zhangbo): support builtin.slice
if (input_num > 0) { if (input_num > 0) {
for (size_t i = 0; i < input_num; ++i) { for (size_t i = 0; i < input_num; ++i) {
auto ptr = (*it)->operand(i); auto ptr = (*it)->operand(i);
...@@ -145,9 +146,29 @@ void BuildScope(ir::Block* block, ...@@ -145,9 +146,29 @@ void BuildScope(ir::Block* block,
name_map->emplace(ptr, name); name_map->emplace(ptr, name);
} }
auto var = scope->Var(name); auto var = scope->Var(name);
// Only support DenseTensor or Vector<DenseTensor>
// need to update here, only support DenseTensor if (ptr.type().isa<paddle::dialect::AllocatedDenseTensorType>()) {
var->GetMutable<phi::DenseTensor>(); var->GetMutable<phi::DenseTensor>();
} else if (ptr.type().isa<ir::VectorType>()) {
auto tensor_array =
var->GetMutable<paddle::framework::TensorRefArray>();
for (size_t i = 0; i < ptr.type().dyn_cast<ir::VectorType>().size();
i++) {
PADDLE_ENFORCE(
ptr.type()
.dyn_cast<ir::VectorType>()[i]
.isa<paddle::dialect::AllocatedDenseTensorType>(),
paddle::platform::errors::Fatal(
"Element of VectorType output only support "
"DenseTensorType"));
std::string name_i = "inner_var_" + std::to_string(count++);
auto var_i = scope->Var(name_i);
tensor_array->emplace_back(var_i->GetMutable<phi::DenseTensor>());
}
} else {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"Output only support DenseTensorType or VectorType"));
}
} }
} }
} }
......
...@@ -13,16 +13,6 @@ cc_test_old( ...@@ -13,16 +13,6 @@ cc_test_old(
phi phi
gtest) gtest)
cc_test_old(
ir_phi_kernel_op_test
SRCS
ir_phi_kernel_op_test.cc
DEPS
pd_dialect
ir
phi
gtest)
cc_test_old( cc_test_old(
ir_infershape_test ir_infershape_test
SRCS SRCS
...@@ -38,6 +28,7 @@ cc_test_old( ...@@ -38,6 +28,7 @@ cc_test_old(
SRCS SRCS
ir_exe_test.cc ir_exe_test.cc
DEPS DEPS
pd_op_to_kernel_pass
pd_dialect pd_dialect
phi_kernel_adaptor phi_kernel_adaptor
ir ir
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/pass/pd_op_to_kernel_pass.h"
#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h" #include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -93,9 +94,10 @@ TEST(program_test, program) { ...@@ -93,9 +94,10 @@ TEST(program_test, program) {
EXPECT_EQ(block->size(), 9u); EXPECT_EQ(block->size(), 9u);
// Execute program // Execute program
auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program);
paddle::framework::Scope scope; paddle::framework::Scope scope;
PhiKernelAdaptor phi_kernel_adaptor(&scope); PhiKernelAdaptor phi_kernel_adaptor(&scope);
phi_kernel_adaptor.run(&program); phi_kernel_adaptor.run_kernel_prog(kernel_program.get());
auto out_tensor = auto out_tensor =
scope.Var(phi_kernel_adaptor.out_name)->Get<phi::DenseTensor>(); scope.Var(phi_kernel_adaptor.out_name)->Get<phi::DenseTensor>();
...@@ -159,9 +161,10 @@ TEST(program_test, mutable_attribute) { ...@@ -159,9 +161,10 @@ TEST(program_test, mutable_attribute) {
EXPECT_EQ(block->size(), 6u); EXPECT_EQ(block->size(), 6u);
// Execute program // Execute program
auto kernel_program = paddle::dialect::PdOpLowerToKernelPass(&program);
paddle::framework::Scope scope; paddle::framework::Scope scope;
PhiKernelAdaptor phi_kernel_adaptor(&scope); PhiKernelAdaptor phi_kernel_adaptor(&scope);
phi_kernel_adaptor.run(&program); phi_kernel_adaptor.run_kernel_prog(kernel_program.get());
auto out_tensor = auto out_tensor =
scope.Var(phi_kernel_adaptor.out_name)->Get<phi::DenseTensor>(); scope.Var(phi_kernel_adaptor.out_name)->Get<phi::DenseTensor>();
......
...@@ -32,21 +32,21 @@ ...@@ -32,21 +32,21 @@
#include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h" #include "paddle/phi/core/kernel_factory.h"
#include "paddle/fluid/ir/interface/infershape.h" #include "paddle/fluid/ir/interface/infermeta.h"
#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/init.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/nullary.h"
// Define op // Define op
class OperationTest class OperationTest
: public ir::Op<OperationTest, paddle::dialect::InferShapeInterface> { : public ir::Op<OperationTest, paddle::dialect::InferMetaInterface> {
public: public:
using Op::Op; using Op::Op;
static const char *name() { return "test.operation2"; } static const char *name() { return "test.operation2"; }
static constexpr uint32_t attributes_num = 2; static constexpr uint32_t attributes_num = 2;
static const char *attributes_name[attributes_num]; static const char *attributes_name[attributes_num];
static void Verify() {} static void Verify() {}
static void InferShape(phi::InferMetaContext *infer_meta) { static void InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::CreateInferMeta); auto fn = PD_INFER_META(phi::CreateInferMeta);
fn(infer_meta); fn(infer_meta);
} }
...@@ -87,15 +87,15 @@ TEST(infershape_test, infershape_test) { ...@@ -87,15 +87,15 @@ TEST(infershape_test, infershape_test) {
ir::Operation *op = ir::Operation *op =
ir::Operation::Create(op_inputs, {}, op_output_types, op_info); ir::Operation::Create(op_inputs, {}, op_output_types, op_info);
paddle::dialect::InferShapeInterface interface = paddle::dialect::InferMetaInterface interface =
op->dyn_cast<paddle::dialect::InferShapeInterface>(); op->dyn_cast<paddle::dialect::InferMetaInterface>();
phi::InferMetaContext infer_meta_ctx; phi::InferMetaContext infer_meta_ctx;
infer_meta_ctx.EmplaceBackAttr(phi::IntArray({5, 6})); infer_meta_ctx.EmplaceBackAttr(phi::IntArray({5, 6}));
infer_meta_ctx.EmplaceBackAttr(phi::DataType::FLOAT32); infer_meta_ctx.EmplaceBackAttr(phi::DataType::FLOAT32);
phi::DenseTensor tensor; phi::DenseTensor tensor;
infer_meta_ctx.EmplaceBackOutput(phi::MetaTensor(&tensor)); infer_meta_ctx.EmplaceBackOutput(phi::MetaTensor(&tensor));
interface.InferShape(&infer_meta_ctx); interface.InferMeta(&infer_meta_ctx);
EXPECT_EQ(tensor.dims().size(), 2); EXPECT_EQ(tensor.dims().size(), 2);
EXPECT_EQ(tensor.dims()[0], 5); EXPECT_EQ(tensor.dims()[0], 5);
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <sstream>
#include "paddle/fluid/ir/dialect/kernel_dialect.h"
#include "paddle/fluid/ir/dialect/kernel_op.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
#include "paddle/fluid/ir/dialect/op_yaml_info_util.h"
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/block.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_dialect.h"
#include "paddle/ir/core/builtin_op.h"
#include "paddle/ir/core/ir_context.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
TEST(program_test, program) {
// (1) Init environment.
ir::IrContext *ctx = ir::IrContext::Instance();
auto kernel_dialect =
ctx->GetOrRegisterDialect<paddle::dialect::PaddleKernelDialect>();
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
// (2) Create an empty program object
ir::Program program(ctx);
// (3) Create a float32 DenseTensor Parameter and save into Program
phi::Place place(phi::AllocationType::CPU);
ir::Type fp32_dtype = ir::Float32Type::get(ctx);
phi::DDim dims = {2, 2};
phi::DataLayout data_layout = phi::DataLayout::NCHW;
phi::LoD lod = {{0, 1, 2}};
size_t offset = 0;
std::string op1_name = paddle::dialect::PhiKernelOp::name();
ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name);
std::unordered_map<std::string, ir::Attribute> op1_attribute{
{"parameter_name", ir::StrAttribute::get(ctx, "a")}};
auto allocated_dense_tensor_dtype =
paddle::dialect::AllocatedDenseTensorType::get(
ctx, place, fp32_dtype, dims, data_layout, lod, offset);
std::stringstream ss;
kernel_dialect->PrintType(allocated_dense_tensor_dtype, ss);
ASSERT_EQ(ss.str() == "cpu_tensor<2x2xf32>", true);
ASSERT_EQ(allocated_dense_tensor_dtype.place() == place, true);
ASSERT_EQ(allocated_dense_tensor_dtype.dims() == dims, true);
ASSERT_EQ(allocated_dense_tensor_dtype.data_layout() == data_layout, true);
ASSERT_EQ(allocated_dense_tensor_dtype.lod() == lod, true);
ASSERT_EQ(allocated_dense_tensor_dtype.offset() == 0, true);
ir::Operation *op1 = ir::Operation::Create(
{}, op1_attribute, {allocated_dense_tensor_dtype}, op1_info);
ASSERT_EQ(op1 != nullptr, true);
}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/ir/dialect/pd_dialect.h"
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/utils.h"
#include "paddle/fluid/ir/interface/infershape.h"
#include "paddle/fluid/ir/interface/op_yaml_info.h"
#include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_dialect.h"
#include "paddle/ir/core/builtin_op.h"
#include "paddle/ir/core/ir_context.h"
#include "paddle/ir/core/program.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/framework/variable_helper.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/fluid/platform/init.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "glog/logging.h"
void BuildScope(ir::Block* block,
paddle::framework::Scope* scope,
std::unordered_map<ir::Value, std::string>* name_map) {
std::unordered_map<ir::Value, int> map_test;
int count = 0;
for (auto it = block->begin(); it != block->end(); ++it) {
int input = (*it)->num_operands();
if (input > 0) {
for (int i = 0; i < input; ++i) {
auto ptr = (*it)->operand(i);
std::string name;
if (name_map->find(ptr) != name_map->end()) {
name = name_map->at(ptr);
} else {
name = "var_" + std::to_string(count++);
name_map->emplace(ptr, name);
}
auto var = scope->Var(name);
// need to update here, only support DenseTensor
var->GetMutable<phi::DenseTensor>();
}
}
int out_num = (*it)->num_results();
if (out_num > 0) {
for (int i = 0; i < out_num; ++i) {
ir::Value ptr = (*it)->result(i);
std::string name;
if (name_map->find(ptr) != name_map->end()) {
name = name_map->at(ptr);
} else {
name = "var_" + std::to_string(count++);
name_map->emplace(ptr, name);
}
auto var = scope->Var(name);
var->GetMutable<phi::DenseTensor>();
}
}
}
}
template <typename T>
void build_context(ir::Operation* op,
const std::unordered_map<ir::Value, std::string>& name_map,
paddle::framework::Scope* scope,
const OpInfoTuple& op_yaml_info,
T* ctx,
bool is_infer_meta = true) {
// inputs include input and mutable attributes
auto input_info = std::get<0>(op_yaml_info);
std::map<std::string, size_t> input_index_map;
std::map<std::string, std::string> mutable_attr_type_map;
int input_index = 0;
for (auto& t : input_info) {
VLOG(6) << t.name << "\t" << t.type_name;
input_index_map[t.name] = input_index++;
if (t.is_mutable_attribute) {
mutable_attr_type_map[t.name] = t.type_name;
}
}
auto attr_info = std::get<1>(op_yaml_info);
std::map<std::string, std::string> attr_type_map;
for (auto& t : attr_info) {
VLOG(6) << t.name << "\t" << t.type_name;
attr_type_map[t.name] = t.type_name;
}
auto attr_map = op->attributes();
auto runtime_info = std::get<3>(op_yaml_info);
// int input_index = 0;
std::vector<std::string> vec_param_list;
if (is_infer_meta) {
vec_param_list = runtime_info.infer_meta_param;
} else {
vec_param_list = runtime_info.kernel_param;
}
for (auto& t : vec_param_list) {
if (input_index_map.count(t)) {
// get information from input
ir::Value ptr = op->operand(input_index_map[t]);
auto in_var_name = name_map.at(ptr);
if (mutable_attr_type_map.count(t)) {
VLOG(6) << "ctx->EmplaceBack mutable attr: " << t << "\t"
<< in_var_name;
if (mutable_attr_type_map[t] == "paddle::dialect::IntArrayAttribute") {
ctx->EmplaceBackAttr(phi::IntArray(
*(scope->Var(in_var_name)->GetMutable<phi::DenseTensor>())));
} else if (mutable_attr_type_map[t] ==
"paddle::dialect::ScalarAttribute") {
ctx->EmplaceBackAttr(phi::Scalar(
*(scope->Var(in_var_name)->GetMutable<phi::DenseTensor>())));
} else {
PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ",
mutable_attr_type_map[t]));
}
} else {
VLOG(6) << "ctx->EmplaceBackInput: " << t << "\t" << in_var_name;
ctx->EmplaceBackInput(
scope->Var(in_var_name)->GetMutable<phi::DenseTensor>());
}
}
if (attr_type_map.count(t)) {
auto type_name = attr_type_map[t];
if (type_name == "paddle::dialect::IntArrayAttribute") {
ctx->EmplaceBackAttr(
attr_map[t].dyn_cast<paddle::dialect::IntArrayAttribute>().data());
} else if (type_name == "paddle::dialect::DataTypeAttribute") {
ctx->EmplaceBackAttr(
attr_map[t].dyn_cast<paddle::dialect::DataTypeAttribute>().data());
} else if (type_name == "ir::Int32Attribute") {
ctx->EmplaceBackAttr(attr_map[t].dyn_cast<ir::Int32Attribute>().data());
} else if (type_name == "paddle::dialect::PlaceAttribute") {
ctx->EmplaceBackAttr(
attr_map[t].dyn_cast<paddle::dialect::PlaceAttribute>().data());
} else if (type_name == "paddle::dialect::ScalarAttribute") {
ctx->EmplaceBackAttr(
attr_map[t].dyn_cast<paddle::dialect::ScalarAttribute>().data());
} else {
PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ",
type_name));
}
VLOG(6) << "ctx->EmplaceBackAttr: " << t;
}
}
ir::Value out_ptr = op->result(0);
auto name = name_map.at(out_ptr);
ctx->EmplaceBackOutput(scope->Var(name)->GetMutable<phi::DenseTensor>());
}
class PhiKernelAdaptor {
public:
explicit PhiKernelAdaptor(paddle::framework::Scope* scope) : scope_(scope) {}
void run(ir::Program* program) {
auto block = program->block();
std::unordered_map<ir::Value, std::string> name_map;
BuildScope(block, scope_, &name_map);
auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace());
phi::Place cpu_place(phi::AllocationType::CPU);
for (auto it = block->begin(); it != block->end(); ++it) {
VLOG(6) << "begin to run op " << (*it)->name();
auto attr_map = (*it)->attributes();
paddle::dialect::OpYamlInfoInterface op_info_interface =
(*it)->dyn_cast<paddle::dialect::OpYamlInfoInterface>();
auto op_info_res = op_info_interface.GetOpInfo();
InferShapeInterface interface = (*it)->dyn_cast<InferShapeInterface>();
phi::InferMetaContext ctx;
build_context<phi::InferMetaContext>(
(*it), name_map, scope_, op_info_res, &ctx);
interface.InferShape(&ctx);
auto runtime_info = std::get<3>(op_info_res);
auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
runtime_info.kernel_func[0]);
phi::KernelKey kernel_key(phi::TransToPhiBackend(cpu_place),
phi::DataLayout::ANY,
phi::DataType::FLOAT32);
if (runtime_info.kernel_func[0] == "full_int_array") {
kernel_key.set_dtype(phi::DataType::INT64);
}
auto found_it = phi_kernels.find(kernel_key);
if (found_it == phi_kernels.end()) {
std::cerr << "kernel name " << runtime_info.kernel_func[0] << std::endl;
std::cerr << "kernel key " << kernel_key.backend() << "\t"
<< kernel_key.dtype() << "\t" << kernel_key.layout()
<< std::endl;
PADDLE_THROW(paddle::platform::errors::NotFound(
"can not found kerenl for [%s]", (*it)->name()));
} else {
phi::KernelContext kernel_ctx(dev_ctx);
build_context<phi::KernelContext>(
(*it), name_map, scope_, op_info_res, &kernel_ctx, false);
found_it->second(&kernel_ctx);
auto out_value = (*it)->result(0);
out_name = name_map[out_value];
}
}
}
void run_kernel_prog(ir::Program* program) {
auto block = program->block();
std::unordered_map<ir::Value, std::string> name_map;
BuildScope(block, scope_, &name_map);
ir::IrContext* ctx = ir::IrContext::Instance();
ctx->GetOrRegisterDialect<paddle::dialect::PaddleDialect>();
auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace());
phi::Place cpu_place(phi::AllocationType::CPU);
for (auto it = block->begin(); it != block->end(); ++it) {
auto attr_map = (*it)->attributes();
auto op_name = attr_map.at("op_name").dyn_cast<ir::StrAttribute>().data();
ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name);
auto impl =
op1_info.GetInterfaceImpl<paddle::dialect::OpYamlInfoInterface>();
auto yaml_info = impl->get_op_info_();
auto attr_info = std::get<1>(yaml_info);
auto infer_shape_impl = op1_info.GetInterfaceImpl<InferShapeInterface>();
phi::InferMetaContext ctx;
build_context<phi::InferMetaContext>(
(*it), name_map, scope_, yaml_info, &ctx);
infer_shape_impl->infer_shape_(&ctx);
auto kernel_name =
attr_map.at("kernel_name").dyn_cast<ir::StrAttribute>().data();
auto kernel_key = attr_map.at("kernel_key")
.dyn_cast<paddle::dialect::KernelAttribute>()
.data();
auto kernel_fn =
phi::KernelFactory::Instance().SelectKernel(kernel_name, kernel_key);
phi::KernelContext kernel_ctx(dev_ctx);
build_context<phi::KernelContext>(
(*it), name_map, scope_, yaml_info, &kernel_ctx, false);
kernel_fn(&kernel_ctx);
auto out_value = (*it)->result(0);
out_name = name_map[out_value];
}
}
std::string out_name;
private:
paddle::framework::Scope* scope_;
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册