未验证 提交 9fc89b34 编写于 作者: H huzhiqiang 提交者: GitHub

Add model check (#40398)

上级 ac5cc136
......@@ -90,6 +90,9 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
return LoDTensorType::get(
parser.getContext(), shape, elementType, lod_level);
}
if (keyword == "dense_tensor_map") {
return DenseTensorMapType::get(parser.getContext());
}
if (keyword == "dense_tensor") {
// parse DenseTensor, for example: !i=Infrt.tensor<X86, CUDA, F32>
llvm::StringRef target;
......@@ -158,6 +161,10 @@ void InfrtDialect::printType(::mlir::Type type,
<< lod_tensor_type.getLod_level() << ">";
return;
}
if (type.isa<infrt::DenseTensorMapType>()) {
os << "dense_tensor_map";
return;
}
// print DenseTensorType, for example: !infrt.dense_tensor<CPU, FP32, NCHW>
if (type.isa<DenseTensorType>()) {
......
......@@ -13,15 +13,17 @@
// limitations under the License.
#include "paddle/infrt/host_context/paddle_mlir.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops_info.h"
MLIRModelGenImpl::MLIRModelGenImpl()
: context_(infrt::Global::getMLIRContext()), builder_(context_) {
context_->allowUnregisteredDialects();
context_->getOrLoadDialect<mlir::StandardOpsDialect>();
context_->getOrLoadDialect<infrt::ts::TensorShapeDialect>();
context_->getOrLoadDialect<infrt::dt::DTDialect>();
context_->getOrLoadDialect<mlir::pd::PaddleDialect>();
context_->getOrLoadDialect<::infrt::InfrtDialect>();
module_ = mlir::ModuleOp::create(mlir::UnknownLoc::get(context_));
}
......@@ -55,7 +57,6 @@ mlir::ModuleOp MLIRModelGenImpl::ImportPaddleModel(
UpdateModelParams(program, &mainFunc);
UpdateModelOps(program);
UpdateModelOutputs(program);
return module_;
}
......@@ -171,7 +172,11 @@ void MLIRModelGenImpl::UpdateModelParams(
ConvertDataType(var_desc.type().lod_tensor().tensor().data_type(),
builder_,
&precision_);
mlir::Type type_ = mlir::RankedTensorType::get(dims, precision_);
mlir::Type type_ =
infrt::DenseTensorType::get(context_,
infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW);
auto op = builder_.create<infrt::dt::TensorMapGetTensorOp>(
mlir::UnknownLoc::get(context_), type_, map, name);
params_map_.insert(std::pair<std::string, mlir::Value>(
......@@ -197,8 +202,9 @@ void MLIRModelGenImpl::UpdateModelOutputs(
llvm::SmallVector<mlir::Type, 4> resultTypes;
llvm::SmallVector<mlir::NamedAttribute, 4> attrs;
mlir::OperationState state(loc,
mlir::ReturnOp::getOperationName(),
::infrt::ReturnOp::getOperationName(),
operands,
resultTypes,
attrs);
......@@ -321,7 +327,7 @@ llvm::SmallVector<mlir::NamedAttribute, 4> MLIRModelGenImpl::GetOpAttributes(
switch (type) {
ATTR_IMPL_CASE(FLOAT, f, getF32FloatAttr);
ATTR_IMPL_CASE(BOOLEAN, b, getBoolAttr);
ATTR_IMPL_CASE(INT, i, getI32IntegerAttr);
ATTR_IMPL_CASE(INT, i, getSI32IntegerAttr);
ATTR_IMPL_CASE(LONG, l, getI64IntegerAttr);
ATTR_IMPL_CASE(STRING, s, getStringAttr);
......
cc_test_tiny(test_abs_model SRCS model/test_abs.cc DEPS infrt ${MLIR_IR_LIBS})
configure_file(lit.cfg.py.in "${CMAKE_SOURCE_DIR}/paddle/infrt/tests/lit.cfg.py")
add_test(NAME test_infrt_by_lit COMMAND sh -c "lit -v ${CMAKE_SOURCE_DIR}/paddle/infrt/tests --filter-out \"disabled_*\""
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.nn import Layer
from paddle.static import InputSpec
from paddle.jit import to_static
import sys
class AbsNet(paddle.nn.Layer):
def __init__(self):
super(AbsNet, self).__init__()
def forward(self, x):
x = paddle.abs(x)
return x
if __name__ == '__main__':
# build network
model = AbsNet()
# save inferencing format model
net = to_static(
model, input_spec=[InputSpec(
shape=[None, 1, 28, 28], name='x')])
paddle.jit.save(net, sys.argv[1])
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
#include <iostream>
#include <string>
#include "llvm/Support/DynamicLibrary.h"
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/host_context/core_runtime.h"
#include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/mlir_to_runtime_translate.h"
#include "paddle/infrt/kernel/basic_kernels.h"
#include "paddle/infrt/kernel/control_flow_kernels.h"
#include "paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/phi/registry.h"
#include "paddle/infrt/kernel/tensor_kernels.h"
#include "paddle/infrt/kernel/tensor_shape_kernels.h"
#include "paddle/infrt/kernel/test_kernels.h"
#include "paddle/infrt/kernel/phi/infershaped/infershaped_utils.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include "paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h"
#include "paddle/infrt/host_context/paddle_mlir.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h"
#include "paddle/infrt/dialect/phi/ir/phi_kernels.h"
static llvm::cl::list<std::string> cl_shared_libs( // NOLINT
"shared_libs",
llvm::cl::desc("Specify shared library with kernels."),
llvm::cl::ZeroOrMore,
llvm::cl::MiscFlags::CommaSeparated);
TEST(ABS_MODEL, convert_and_execute) {
std::string model_file_name = "./abs.pdmodel";
std::string params_file_name = "./abs.pdiparams";
// convert model
MLIRModelGenImpl myGen;
auto module_ = myGen.ImportPaddleModel(model_file_name, params_file_name);
module_.dump();
// pick kernel
mlir::MLIRContext* context = infrt::Global::getMLIRContext();
context->allowUnregisteredDialects();
context->getOrLoadDialect<mlir::StandardOpsDialect>();
context->getOrLoadDialect<infrt::InfrtDialect>();
context->getOrLoadDialect<infrt::ts::TensorShapeDialect>();
context->getOrLoadDialect<infrt::InfrtDialect>();
context->getOrLoadDialect<infrt::dt::DTDialect>();
context->getOrLoadDialect<mlir::pd::PaddleDialect>();
context->getOrLoadDialect<infrt::phi::PHIDenseTensorDialect>();
context->getOrLoadDialect<infrt::phi::PHICPUKernelDialect>();
context->getOrLoadDialect<infrt::phi::PHIGPUKernelDialect>();
context->getOrLoadDialect<infrt::phi::PHIDialect>();
context->loadAllAvailableDialects();
mlir::PassManager pm(context);
mlir::OpPassManager& phi_pass_manager = pm.nest<mlir::FuncOp>();
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass());
if (mlir::failed(pm.run(module_))) {
std::cout << "\npass failed!\n" << std::endl;
}
module_.dump();
// executate
infrt::host_context::KernelRegistry registry;
infrt::kernel::RegisterBasicKernels(&registry);
infrt::kernel::RegisterTestKernels(&registry);
infrt::kernel::RegisterTensorShapeKernels(&registry);
infrt::kernel::RegisterTensorKernels(&registry);
infrt::kernel::RegisterControlFlowKernels(&registry);
infrt::kernel::RegisterPhiKernels(&registry);
infrt::kernel::RegisterInferShapeLaunchers(&registry);
// load extra shared library
for (const auto& lib_path : cl_shared_libs) {
std::string err;
llvm::sys::DynamicLibrary dynLib =
llvm::sys::DynamicLibrary::getPermanentLibrary(lib_path.c_str(), &err);
if (!dynLib.isValid()) {
llvm::errs() << "Load shared library failed. Error: " << err << "\n";
break;
}
if (auto reg_sym = dynLib.SearchForAddressOfSymbol("RegisterKernels")) {
auto reg_func =
reinterpret_cast<void (*)(infrt::host_context::KernelRegistry*)>(
reg_sym);
reg_func(&registry);
} else {
llvm::outs() << "Symbol \"RegisterKernels\" not found in \"" << lib_path
<< "\". Skip.\n";
}
}
infrt::host_context::TestMlir(module_, &registry);
}
......@@ -44,6 +44,8 @@ function update_pd_ops() {
cd ${PADDLE_ROOT}/tools/infrt/
python3 generate_pd_op_dialect_from_paddle_op_maker.py
python3 generate_phi_kernel_dialect.py
# generate test model
python3 paddle/infrt/tests/model/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs
}
function init() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册