未验证 提交 50fad3ed 编写于 作者: H huzhiqiang 提交者: GitHub

update infrt script (#40670)

上级 34dfb0ec
......@@ -30,6 +30,8 @@ llvm::Optional<LayoutType> GetLayoutType(llvm::StringRef key) {
return LayoutType::NCHW;
else if (key.equals_insensitive("NHWC"))
return LayoutType::NHWC;
else if (key.equals_insensitive("ANY"))
return LayoutType::ANY;
else
return llvm::None;
}
......@@ -39,6 +41,8 @@ llvm::Optional<PrecisionType> GetPrecisionType(llvm::StringRef key) {
return PrecisionType::FLOAT32;
else if (key.equals_insensitive("FP16"))
return PrecisionType::FLOAT16;
else if (key.equals_insensitive("UNK"))
return PrecisionType::UNK;
else
return llvm::None;
}
......@@ -67,6 +71,9 @@ llvm::StringRef GetString(LayoutType type) {
case (LayoutType::NHWC):
str = "NHWC";
break;
case (LayoutType::ANY):
str = "ANY";
break;
default:
str = "Unsupported";
}
......@@ -82,6 +89,9 @@ llvm::StringRef GetString(PrecisionType type) {
case (PrecisionType::FLOAT16):
str = "FP16";
break;
case (PrecisionType::UNK):
str = "UNK";
break;
default:
str = "Unsupported";
}
......
......@@ -91,11 +91,15 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetModelInputsType(
if (var_desc.name() == input_var_name) {
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
mlir::Type precision_;
ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(),
builder_,
&precision_);
mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_);
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(
var_desc.type().lod_tensor().tensor().data_type(), &precision_);
mlir::Type type_ =
infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::ANY);
operandTypes.push_back(type_);
}
}
......@@ -117,11 +121,14 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetModelOutputsType(
if (var_desc.name() == input_var_name) {
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
mlir::Type precision_;
ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(),
builder_,
&precision_);
mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_);
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(
var_desc.type().lod_tensor().tensor().data_type(), &precision_);
mlir::Type type_ =
infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::ANY);
resultTypes.push_back(type_);
}
}
......@@ -168,15 +175,11 @@ void MLIRModelGenImpl::UpdateModelParams(
auto name = builder_.getStringAttr(var_desc.name());
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
mlir::Type precision_;
ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(),
builder_,
&precision_);
mlir::Type type_ =
infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW);
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(),
&precision_);
mlir::Type type_ = infrt::DenseTensorType::get(
context_, infrt::TargetType::CPU, precision_, infrt::LayoutType::ANY);
auto op = builder_.create<infrt::dt::TensorMapGetTensorOp>(
mlir::UnknownLoc::get(context_), type_, map, name);
params_map_.insert(std::pair<std::string, mlir::Value>(
......@@ -262,11 +265,13 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetOpOutputType(
if (var_desc.name() == var_name) {
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
mlir::Type precision_;
ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(),
builder_,
&precision_);
mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_);
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(),
&precision_);
mlir::Type type_ = infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::ANY);
resultTypes.push_back(type_);
}
}
......@@ -403,3 +408,38 @@ bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype,
return false;
}
}
bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype,
infrt::PrecisionType *type) {
switch (dtype) {
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP16:
*type = infrt::PrecisionType::FLOAT16;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP32:
*type = infrt::PrecisionType::FLOAT32;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP64:
*type = infrt::PrecisionType::FLOAT64;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_BOOL:
*type = infrt::PrecisionType::BOOL;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT8:
*type = infrt::PrecisionType::INT8;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT16:
*type = infrt::PrecisionType::INT16;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT32:
*type = infrt::PrecisionType::INT32;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_INT64:
*type = infrt::PrecisionType::INT64;
return true;
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_UINT8:
*type = infrt::PrecisionType::UINT8;
return true;
default:
return false;
}
}
......@@ -102,4 +102,7 @@ inline std::vector<T> RepeatedToVector(
bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype,
mlir::Builder builder,
mlir::Type *type);
bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype,
infrt::PrecisionType *type);
#endif // PADDLE_INFRT_HOST_CONTEXT_PADDLE_MLIR_H_
......@@ -45,6 +45,7 @@ function update_pd_ops() {
python3 generate_pd_op_dialect_from_paddle_op_maker.py
python3 generate_phi_kernel_dialect.py
# generate test model
cd ${PADDLE_ROOT}
python3 paddle/infrt/tests/model/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs
}
......
......@@ -191,6 +191,21 @@ def generate_all_ops_inputs_outputs_map(op_descs):
ops_inputs_outputs_head_file.write(cpp_style_ops_outputs_map_str)
def get_constraint(op_type, op_proto):
# 2.3.1 inputs
constraint = "NoSideEffect"
optional_input_num_ = 0
for input_ in op_proto[INPUTS]:
if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][input_][
INTERMEDIATE] != True and op_proto[INPUTS][input_][
DISPENSABLE] == True:
optional_input_num_ += 1
if optional_input_num_ > 1:
constraint += ", AttrSizedOperandSegments"
return constraint
# funtion to generate paddle op dialect file
def convert_op_proto_into_mlir(op_descs):
dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td"
......@@ -237,9 +252,11 @@ def convert_op_proto_into_mlir(op_descs):
if (op_type in skipped_op_list) or (op_type not in original_ops_):
continue
automatically_generated_op_dialect.append(op_type)
constraint_ = get_constraint(op_type, op_proto)
# 2.1 OpDef
HEAD = 'def PD_{op_type_capitalize}Op : PD_Op<"{op_type}", [NoSideEffect]> {left_brace}\n'.format(
HEAD = 'def PD_{op_type_capitalize}Op : PD_Op<"{op_type}", [{constraint}]> {left_brace}\n'.format(
op_type_capitalize=op_type.capitalize(),
constraint=constraint_,
op_type=op_type,
left_brace="{")
SUMMARY = ' let summary = "{} op";\n'.format(op_type)
......@@ -256,14 +273,22 @@ def convert_op_proto_into_mlir(op_descs):
ARGUMENTS = ""
if (len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0):
ARGUMENTS = " let arguments = (ins "
# 2.3.1 inputs
for input_ in op_proto[INPUTS]:
if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][
input_][INTERMEDIATE] != True:
if op_proto[INPUTS][input_][DUPLICABLE] != "true":
ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + ","
if op_proto[INPUTS][input_][DISPENSABLE] != True:
if op_proto[INPUTS][input_][DUPLICABLE] != True:
ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + ","
else:
ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + ","
else:
ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + ","
if op_proto[INPUTS][input_][DUPLICABLE] != True:
ARGUMENTS = ARGUMENTS + " Optional<PD_Tensor>:$" + input_ + ","
else:
ARGUMENTS = ARGUMENTS + " Optional<PD_Tensor_Array>:$" + input_ + ","
# unsupported: BLOCK = 8; BLOCKS = 10;
attr_mlir_converter = {
0: 'SI32Attr',
......@@ -332,7 +357,7 @@ def convert_op_proto_into_mlir(op_descs):
for output_ in op_proto[OUTPUTS]:
if op_proto[OUTPUTS][output_][EXTRA] != True and op_proto[
OUTPUTS][output_][INTERMEDIATE] != True:
if op_proto[OUTPUTS][output_][DUPLICABLE] != "true":
if op_proto[OUTPUTS][output_][DUPLICABLE] != True:
outputs = outputs + "PD_Tensor:${},".format(output_)
else:
outputs = outputs + "PD_Tensor_Array:${},".format(
......
......@@ -43,7 +43,8 @@ precision_type_converter = {
"float64": "FLOAT64",
"complex64": "COMPLEX64",
"complex128": "COMPLEX128",
"bool": "BOOL"
"bool": "BOOL",
"Undefined": "UNK"
}
kernel_types_info_file = "./kernels.json"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册