// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/fluid/ir/dialect/op_yaml_info_util.h" #include "paddle/fluid/ir/dialect/pd_dialect.h" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/utils.h" #include "paddle/fluid/ir/interface/op_yaml_info.h" #include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/ir_context.h" #include "paddle/ir/core/program.h" #include "paddle/ir/core/utils.h" #include "paddle/phi/core/meta_tensor.h" #include "paddle/fluid/framework/new_executor/interpreter/execution_config.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable_helper.h" #include "paddle/phi/core/kernel_context.h" #include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/kernel_type.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/fluid/ir/interface/op_yaml_info_parser.h" #include "paddle/phi/core/infermeta_utils.h" #include "glog/logging.h" namespace ir { void BuildScope(const ir::Block& block, paddle::framework::Scope* inner_scope, std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, std::vector* variable_list); template void BuildPhiContext( ir::Operation* op, const std::unordered_map& name_map, paddle::framework::Scope* scope, paddle::framework::Scope* local_scope, const paddle::dialect::OpYamlInfoParser& op_yaml_info, Context* ctx, std::map>* input_map = nullptr, std::map>* output_map = nullptr) { paddle::framework::Scope* inner_scope = local_scope != nullptr ? local_scope : scope; VLOG(6) << "BuildPhiContext in scope[" << scope << "] inner_scope[" << inner_scope << "]"; // inputs include input and mutable attributes auto attr_map = op->attributes(); auto& vec_kernel_fn_tensor_params = op_yaml_info.TensorParams(is_kernel); auto& name2id = op_yaml_info.InputName2Id(); for (auto& t : vec_kernel_fn_tensor_params) { PADDLE_ENFORCE_EQ( name2id.count(t), true, phi::errors::NotFound("param [%s] MUST in name2id map", t)); auto index = op_yaml_info.InputName2Id().at(t); ir::Value ptr = op->operand(index); if (!ptr) { phi::DenseTensor* ptr = nullptr; OutType in_ptr(ptr); ctx->EmplaceBackInput(in_ptr); continue; } auto in_var_name = name_map.at(ptr); VLOG(6) << "ctx->EmplaceBackInput: " << t << "\t" << in_var_name; PADDLE_ENFORCE_NOT_NULL(inner_scope->FindVar(in_var_name), phi::errors::PreconditionNotMet( "can not find var[%s] in scope", in_var_name)); auto var = inner_scope->FindVar(in_var_name); if (var->IsType()) { const phi::TensorBase* tensor_in = &(var->Get()); ctx->EmplaceBackInput(InType(tensor_in)); } else if (var->IsType()) { InListType inputs; auto& variable_array = var->Get(); for (size_t i = 0; i < variable_array.size(); ++i) { inputs.emplace_back(InType(const_cast( &(variable_array[i]->Get())))); } ctx->EmplaceBackInputs(inputs); } else { PADDLE_THROW(phi::errors::Unimplemented("Not support var type [%d] ", var->Type())); } } auto& vec_kernel_fn_attr_params = op_yaml_info.AttrParams(is_kernel); for (auto& t : vec_kernel_fn_attr_params) { if (name2id.count(t)) { // tensor attribute, get information from input ir::Value ptr = op->operand(name2id.at(t)); auto in_var_name = name_map.at(ptr); if (input_map != nullptr) { // only deal with single input for now, [todo] need support multi input // like concat // TODO(phlrain): OpFuncNode need input_index and output_index, // construct input_index and output_here, should remove input_index and // output_index from OpFuncNode Each in_var_name named "inner_var_" + // index, len("inner_var_") = 10 size_t tmp_id = std::atol(in_var_name.substr(4, 100).c_str()); (*input_map)[std::to_string(name2id.at(t))].push_back(tmp_id); } auto& tensor_attr_type = op_yaml_info.TensorAttrTypeName(t); VLOG(6) << "ctx->EmplaceBack mutable attr: " << t << "\t" << in_var_name; if (tensor_attr_type == "paddle::dialect::IntArrayAttribute") { if (ptr.type().isa()) { phi::Attribute attr = phi::TensorRef( &(inner_scope->FindVar(in_var_name)->Get())); ctx->EmplaceBackAttr(attr); } else if (ptr.type().isa()) { auto& tensor_array = inner_scope->FindVar(in_var_name) ->Get(); if (tensor_array.size() == 1) { phi::Attribute attr = phi::TensorRef(&(tensor_array[0]->Get())); ctx->EmplaceBackAttr(attr); } else { std::vector vec_ref; for (size_t i = 0; i < tensor_array.size(); ++i) { vec_ref.emplace_back( phi::TensorRef(&(tensor_array[i]->Get()))); } ctx->EmplaceBackAttr(vec_ref); } } else { PADDLE_THROW(phi::errors::Unimplemented( " [%s] only support dense tensor and vector type ", tensor_attr_type)); } } else if (tensor_attr_type == "paddle::dialect::ScalarAttribute") { phi::Attribute r1 = phi::TensorRef( &(inner_scope->FindVar(in_var_name)->Get())); ctx->EmplaceBackAttr(r1); } else { PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", tensor_attr_type)); } continue; } auto& attr_type_name = op_yaml_info.AttrTypeName(t); if (attr_type_name == "paddle::dialect::IntArrayAttribute") { ctx->EmplaceBackAttr( attr_map[t].dyn_cast().data()); } else if (attr_type_name == "paddle::dialect::DataTypeAttribute") { ctx->EmplaceBackAttr( attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::Int32Attribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::Int64Attribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::FloatAttribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::BoolAttribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::StrAttribute") { ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); } else if (attr_type_name == "ir::ArrayAttribute") { auto array_list = attr_map[t].dyn_cast().data(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( array_list[0].isa(), true, phi::errors::Unimplemented( "the 0th elementwise MUST be dialect::ScalarAttribute")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back(array_list[i] .dyn_cast() .data()); } } ctx->EmplaceBackAttr(vec_res); } else if (attr_type_name == "ir::ArrayAttribute") { auto array_list = attr_map[t].dyn_cast().data(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( array_list[0].isa(), true, phi::errors::Unimplemented( "the 0th elementwise MUST be ir::Int32Attribute")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( array_list[i].dyn_cast().data()); } } ctx->EmplaceBackAttr(vec_res); } else if (attr_type_name == "ir::ArrayAttribute") { auto array_list = attr_map[t].dyn_cast().data(); std::vector vec_res; if (array_list.size() > 0) { if (array_list[0].isa()) { for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( array_list[i].dyn_cast().data()); } } else { PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", attr_type_name)); } } ctx->EmplaceBackAttr(vec_res); } else if (attr_type_name == "ir::ArrayAttribute") { auto array_list = attr_map[t].dyn_cast().data(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( array_list[0].isa(), true, phi::errors::PreconditionNotMet( "Element in array list MUST be ir::Int64Attribute ")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( array_list[i].dyn_cast().data()); } } ctx->EmplaceBackAttr(vec_res); } else if (attr_type_name == "ir::ArrayAttribute") { auto array_list = attr_map[t].dyn_cast().data(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( array_list[0].isa(), true, phi::errors::PreconditionNotMet( "Element in array list MUST be ir::Int64Attribute ")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( array_list[i].dyn_cast().data()); } } ctx->EmplaceBackAttr(vec_res); } else if (attr_type_name == "paddle::dialect::PlaceAttribute") { ctx->EmplaceBackAttr( attr_map[t].dyn_cast().data()); } else if (attr_type_name == "paddle::dialect::ScalarAttribute") { ctx->EmplaceBackAttr( attr_map[t].dyn_cast().data()); } else { PADDLE_THROW(phi::errors::Unimplemented("attr type not support [%s] ", attr_type_name)); } VLOG(6) << "ctx->EmplaceBackAttr: " << t; } // TODO(phlrain): use var type instead of op name if (op->attributes().count("op_name") && (op->attributes().at("op_name").dyn_cast().data() == "pd.fetch")) { // process fetch op auto fetch_var = inner_scope->FindVar("fetch"); auto* fetch_list = fetch_var->GetMutable(); int index = op->attributes().at("col").dyn_cast().data(); auto* out_tensor = &(PADDLE_GET(phi::DenseTensor, fetch_list->at(index))); ctx->EmplaceBackOutput(out_tensor); } else { for (size_t i = 0; i < op->num_results(); ++i) { ir::Value out_ptr = op->result(i); auto name = name_map.at(out_ptr); VLOG(6) << "ctx->EmplaceBackOutput: " << name; auto out_type = out_ptr.type(); if (!out_type) { phi::DenseTensor* ptr = nullptr; OutType out_ptr(ptr); ctx->EmplaceBackOutput(out_ptr); } else if (out_type.isa()) { ctx->EmplaceBackOutput(OutType(const_cast( &(inner_scope->FindVar(name)->Get())))); } else if (out_type.isa()) { ctx->EmplaceBackOutput(OutType(const_cast( &(inner_scope->FindVar(name)->Get())))); } else if (out_type.isa()) { OutListType outputs; auto& variable_array = scope->FindVar(name)->Get(); for (size_t i = 0; i < variable_array.size(); ++i) { outputs.emplace_back(OutType(const_cast( &(variable_array[i]->Get())))); } ctx->EmplaceBackOutputs(outputs); } else { PADDLE_THROW( phi::errors::Unimplemented("only support DenseTensor and vector ")); } if (output_map != nullptr) { // only deal with single input for now, [todo] need support multi input // like concat // TODO(phlrain): OpFuncNode need input_index and output_index, // construct input_index and output_here, should remove input_index and // output_index from OpFuncNode Each in_var_name named "inner_var_" + // index, len("inner_var_") = 10 size_t tmp_id = std::atol(name.substr(4, 100).c_str()); (*output_map)["out"].push_back(tmp_id); } } } VLOG(6) << "Done build phi context"; } } // namespace ir