未验证 提交 3a6201af 编写于 作者: 王明冬 提交者: GitHub

[infrt] add resnet50 unit test. test=develop (#40950)

上级 7e05680c
......@@ -13,6 +13,9 @@
// limitations under the License.
#include "paddle/infrt/host_context/paddle_mlir.h"
#include <mlir/IR/OpDefinition.h>
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd/common/pd_ops_info.h"
......@@ -95,13 +98,13 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetModelInputsType(
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(
ConvertDataTypeToInfrt(
var_desc.type().lod_tensor().tensor().data_type(), &precision_);
mlir::Type type_ =
infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::ANY);
infrt::LayoutType::NCHW);
operandTypes.push_back(type_);
}
......@@ -125,13 +128,13 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetModelOutputsType(
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(
ConvertDataTypeToInfrt(
var_desc.type().lod_tensor().tensor().data_type(), &precision_);
mlir::Type type_ =
infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::ANY);
infrt::LayoutType::NCHW);
resultTypes.push_back(type_);
}
}
......@@ -179,10 +182,12 @@ void MLIRModelGenImpl::UpdateModelParams(
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(),
&precision_);
mlir::Type type_ = infrt::DenseTensorType::get(
context_, infrt::TargetType::CPU, precision_, infrt::LayoutType::ANY);
ConvertDataTypeToInfrt(var_desc.type().lod_tensor().tensor().data_type(),
&precision_);
mlir::Type type_ = infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::NCHW);
auto op = builder_.create<::infrt::phi::TensorMapGetTensorOp>(
mlir::UnknownLoc::get(context_), type_, map, name);
params_map_.insert(std::pair<std::string, mlir::Value>(
......@@ -222,12 +227,42 @@ void MLIRModelGenImpl::UpdateModelOutputs(
void MLIRModelGenImpl::buildOperation(
const infrt::paddle::framework_proto::OpDesc &op_) {
const std::string &op_name = "pd." + op_.type();
const std::string op_name = "pd." + op_.type();
mlir::Location loc = mlir::UnknownLoc::get(context_);
mlir::OperationState result(loc, op_name);
if (!result.name.isRegistered()) {
LOG(FATAL) << "Find unregistered operation: " << op_name;
return;
}
if (result.name.hasTrait<mlir::OpTrait::AttrSizedOperandSegments>()) {
LOG(FATAL) << "Find operation: " << op_name
<< "has trait: AttrSizedOperandSegments. Current not support!";
return;
}
llvm::SmallVector<mlir::Value, 4> operands = GetOpInputValue(op_);
int empty_operand_cnt = 0;
for (auto it = operands.begin(); it != operands.end();) {
if (*it) {
++it;
} else {
operands.erase(it);
++empty_operand_cnt;
}
}
if (empty_operand_cnt > 1) {
LOG(FATAL)
<< "Find operation: " << op_name << ", has " << empty_operand_cnt
<< " empty operands. current not support empty operands more than one!";
return;
}
result.addOperands(operands);
llvm::SmallVector<mlir::Type, 4> resultTypes = GetOpOutputType(op_);
llvm::SmallVector<mlir::NamedAttribute, 4> attrs = GetOpAttributes(op_);
mlir::OperationState result(loc, op_name, operands, resultTypes, attrs);
result.addTypes(resultTypes);
result.addAttributes(attrs);
mlir::Operation *mlir_op_ = builder_.createOperation(result);
RegisterOpOutputVars(op_, mlir_op_);
}
......@@ -239,11 +274,13 @@ llvm::SmallVector<mlir::Value, 4> MLIRModelGenImpl::GetOpInputValue(
std::unordered_map<std::string, uint8_t> inputs_info = {};
if (pd_dialect_inputs_info_map_.count(op_.type()))
inputs_info = pd_dialect_inputs_info_map_.at(op_.type());
operands.resize(inputs_info.size());
for (int var_idx = 0; var_idx < op_.inputs_size(); ++var_idx) {
auto &var = op_.inputs(var_idx);
if (!var.arguments().empty()) {
if (!inputs_info.count(var.parameter())) continue;
operands.push_back((params_map_[var.arguments()[0]]));
operands[inputs_info.at(var.parameter())] =
params_map_[var.arguments()[0]];
}
}
return operands;
......@@ -256,7 +293,7 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetOpOutputType(
std::unordered_map<std::string, uint8_t> pd_dialect_outputs_info = {};
if (pd_dialect_outputs_info_map_.count(op_.type()))
pd_dialect_outputs_info = pd_dialect_outputs_info_map_.at(op_.type());
resultTypes.resize(pd_dialect_outputs_info.size());
// update op outputs info
for (int var_idx = 0; var_idx < op_.outputs_size(); ++var_idx) {
auto &var_name = op_.outputs(var_idx).arguments()[0];
......@@ -269,13 +306,14 @@ llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetOpOutputType(
std::vector<int64_t> dims = RepeatedToVector<int64_t>(
var_desc.type().lod_tensor().tensor().dims());
infrt::PrecisionType precision_;
ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(),
&precision_);
ConvertDataTypeToInfrt(
var_desc.type().lod_tensor().tensor().data_type(), &precision_);
mlir::Type type_ = infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
precision_,
infrt::LayoutType::ANY);
resultTypes.push_back(type_);
infrt::LayoutType::NCHW);
resultTypes[pd_dialect_outputs_info.at(
op_.outputs(var_idx).parameter())] = type_;
}
}
}
......@@ -412,8 +450,8 @@ bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype,
}
}
bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype,
infrt::PrecisionType *type) {
bool ConvertDataTypeToInfrt(infrt::paddle::framework_proto::VarType::Type dtype,
infrt::PrecisionType *type) {
switch (dtype) {
case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP16:
*type = infrt::PrecisionType::FLOAT16;
......
......@@ -102,7 +102,7 @@ inline std::vector<T> RepeatedToVector(
bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype,
mlir::Builder builder,
mlir::Type *type);
bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype,
infrt::PrecisionType *type);
bool ConvertDataTypeToInfrt(infrt::paddle::framework_proto::VarType::Type dtype,
infrt::PrecisionType *type);
#endif // PADDLE_INFRT_HOST_CONTEXT_PADDLE_MLIR_H_
......@@ -37,8 +37,7 @@ template <typename KernelFunc,
typename InferShapedFunc,
InferShapedFunc infershape>
void KernelLauncherFunc(host_context::KernelFrame* frame) {
static InferShapedKernelLauncher launcher(
FuncArgStatics<InferShapedFunc>::arg_size);
InferShapedKernelLauncher launcher(FuncArgStatics<InferShapedFunc>::arg_size);
static const uint16_t num_input_tensors{InferShapeHelper<KernelFunc>::count};
static const bool turn_on_infer_shape_cache{true};
......
cc_test_tiny(test_abs_model SRCS model/test_abs.cc DEPS infrt ${MLIR_IR_LIBS})
cc_test_tiny(test_abs_model SRCS models/test_abs.cc DEPS infrt ${MLIR_IR_LIBS})
configure_file(lit.cfg.py.in "${CMAKE_SOURCE_DIR}/paddle/infrt/tests/lit.cfg.py")
......@@ -7,4 +7,5 @@ add_test(NAME test_infrt_by_lit COMMAND sh -c "lit -v ${CMAKE_SOURCE_DIR}/paddle
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensor/tensor_map.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensor/tensor_map.mlir)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/linear_cpu.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/linear_cpu.mlir)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/resnet50.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/resnet50.mlir)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensorrt/disabled_linear.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensorrt/disabled_linear.mlir)
此差异已折叠。
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.vision.models import resnet50
from paddle.jit import to_static
from paddle.static import InputSpec
import sys
model = resnet50(True)
net = to_static(
model, input_spec=[InputSpec(
shape=[None, 3, 256, 256], name='x')])
paddle.jit.save(net, sys.argv[1])
......@@ -46,7 +46,9 @@ function update_pd_ops() {
python3 generate_phi_kernel_dialect.py
# generate test model
cd ${PADDLE_ROOT}
python3 paddle/infrt/tests/model/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs
mkdir -p ${PADDLE_ROOT}/build/models
python3 paddle/infrt/tests/models/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs
python3 paddle/infrt/tests/models/resnet50_model.py ${PADDLE_ROOT}/build/models/resnet50/model
}
function init() {
......@@ -114,7 +116,7 @@ function create_fake_models() {
python3 -m pip install *whl
cd ${PADDLE_ROOT}/build
python3 ${PADDLE_ROOT}/tools/infrt/fake_models/multi_fc.py
python3 ${PADDLE_ROOT}/paddle/infrt/tests/model/linear.py
python3 ${PADDLE_ROOT}/paddle/infrt/tests/models/linear.py
}
function test_infrt() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册