未验证 提交 8aba826f 编写于 作者: 王明冬 提交者: GitHub

[infrt] add phi ir test to infrt-exec. test=develop (#40384)

上级 f3962530
......@@ -24,6 +24,7 @@
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/host_context/core_runtime.h"
#include "paddle/infrt/host_context/kernel_registry.h"
......@@ -41,7 +42,6 @@
using namespace infrt::host_context; // NOLINT
using namespace infrt::tensor; // NOLINT
using namespace infrt::tensor; // NOLINT
using infrt::dt::TensorMapType; // NOLINT
namespace infrt {
......@@ -129,7 +129,7 @@ class PredictExecutor : public MlirToRuntimeTranslator {
auto arg = predict_func.getArgument(i);
auto type = arg.getType();
// this param is TensorMap
if (type.isa<TensorMapType>()) {
if (type.isa<infrt::DenseTensorMapType>()) {
auto* value = new host_context::Value(std::move(*map));
arguments_.push_back(value);
AddValue(predict_func.getArgument(i), value);
......
......@@ -111,25 +111,13 @@ def PrintI64Op : PrintOp<"i64", I64>;
def PrintF32Op : PrintOp<"f32", F32>;
def PrintF64Op : PrintOp<"f64", F64>;
def GetStringOp : INFRT_Op<"get_string"> {
let summary = "Infrt.get_string";
let description = [{
Get a !infrt.string value from the given string attribute.
}];
let arguments = (ins StrAttr:$value);
let results = (outs StringType);
let assemblyFormat = "`(` $value `)` attr-dict";
let verifier = ?;
}
def PrintStringOp : INFRT_Op<"print_string"> {
let summary = "Infrt.print_string";
let description = [{
An operation that prints a string.
}];
let arguments = (ins StringType:$input);
let arguments = (ins StrAttr:$input);
let results = (outs);
let assemblyFormat = "`(` $input `)` attr-dict";
let verifier = ?;
......
......@@ -38,23 +38,6 @@ void DTDialect::initialize() {
#include "paddle/infrt/dialect/dense_tensor.cpp.inc"
>();
}
TensorMapType TensorMapType::get() {
return Base::get(::infrt::Global::getMLIRContext());
}
TensorMapType TensorMapType::get(mlir::MLIRContext *context) {
return Base::get(context);
}
StringType StringType::get() {
return Base::get(::infrt::Global::getMLIRContext());
}
StringType StringType::get(mlir::MLIRContext *context) {
return Base::get(context);
}
static mlir::Type getTensorType(mlir::MLIRContext *context) {
auto t_dialect = mlir::Identifier::get("t", context);
return mlir::OpaqueType::get(t_dialect, "tensor");
......
......@@ -21,27 +21,6 @@
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
namespace infrt {
namespace dt {
class TensorMapType : public mlir::Type::TypeBase<TensorMapType,
mlir::Type,
mlir::TypeStorage> {
public:
using Base::Base;
static TensorMapType get();
static TensorMapType get(mlir::MLIRContext *context);
};
class StringType
: public mlir::Type::TypeBase<StringType, mlir::Type, mlir::TypeStorage> {
public:
using Base::Base;
static StringType get();
static StringType get(mlir::MLIRContext *context);
};
} // namespace dt
} // namespace infrt
#include "paddle/infrt/dialect/dense_tensor_dialect.hpp.inc"
#define GET_OP_CLASSES
......
......@@ -105,11 +105,10 @@ def LoadParamsOp : DT_Op<"load_params", [NoSideEffect]> {
}];
// input path of model params.
let arguments = (ins StringType:$path);
let results = (outs TensorMapType);
let arguments = (ins StrAttr:$path);
let results = (outs DenseTensorMap:$out);
let assemblyFormat = "`(` operands `)` attr-dict";
let verifier = ?;
let assemblyFormat = "`(``)`attr-dict";
}
......@@ -122,7 +121,7 @@ def TensorMapGetTensorOp : DT_Op<"tensor_map_get_tensor", [NoSideEffect]> {
// input path of model params.
let arguments = (ins
TensorMapType:$map,
DenseTensorMap:$map,
StrAttr:$name
);
let results = (outs DenseTensor:$output);
......@@ -137,7 +136,7 @@ def TensorMapGetSizeOp : DT_Op<"tensor_map_get_size", [NoSideEffect]> {
An operation that get the size of a TensorMap.
}];
let arguments = (ins TensorMapType:$map);
let arguments = (ins DenseTensorMap:$map);
let results = (outs I32:$size);
let assemblyFormat = "`(` $map `)` attr-dict `->` type($size)";
}
......
......@@ -30,8 +30,7 @@ def Infrt_ReturnOp : Infrt_Op<"return", [Terminator]> {
let arguments = (ins Variadic<AnyType>:$operands);
let builders = [OpBuilder<(ins),
[{ build($_builder, $_state, llvm::None); }]>];
let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?";
}
def Infrt_CvtTensorOp : Infrt_Op<"cvt_tensor", [NoSideEffect]> {
......
......@@ -83,6 +83,12 @@ def DenseTensor : Infrt_Type<"DenseTensor"> {
);
}
def DenseTensorMap : Infrt_Type<"DenseTensorMap"> {
let summary = "infrt dense tensor map";
let description = [{dense_tensor map}];
let parameters = (ins);
}
// Type Constrait for concrete DenseTensor type.
class DenseTensor<string target, string precision, string layout> :
Type<CPred<"$_self == ::infrt::DenseTensorType::get($_self.getContext(), ::infrt::TargetType::"#target#",::infrt::PrecisionType::"#precision#",::infrt::LayoutType::"#layout#")">,
......
......@@ -25,10 +25,6 @@ namespace dialect {
void INFRTDialect::initialize() {
allowUnknownTypes();
allowUnknownOperations();
addTypes<infrt::dt::StringType>();
addTypes<infrt::dt::TensorMapType>();
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/basic_kernels.cpp.inc"
......@@ -43,14 +39,6 @@ mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const {
llvm::StringRef keyword;
if (parser.parseKeyword(&keyword)) return mlir::Type();
// parse TensorMapType, for example: !infrt.tensor_map
if (keyword == "tensor_map") {
return infrt::dt::TensorMapType::get();
}
// parse StringType, for example: !infrt.string
if (keyword == "string") {
return infrt::dt::StringType::get();
}
parser.emitError(parser.getCurrentLocation(), "unknown infrt type: ")
<< keyword;
return mlir::Type();
......@@ -59,15 +47,6 @@ mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const {
void INFRTDialect::printType(mlir::Type type,
mlir::DialectAsmPrinter &printer) const {
// print TensorMapType, for example: !infrt.tensor_map
if (type.isa<infrt::dt::TensorMapType>()) {
printer << "tensor_map";
return;
}
// print StringType, for example: !infrt.string
if (type.isa<infrt::dt::StringType>()) {
printer << "string";
return;
}
llvm_unreachable("unknown infrt type.");
}
......
......@@ -14,15 +14,6 @@ def INFRT_Dialect : Dialect {
let cppNamespace = "::infrt::dialect";
}
// Type definitions
def StringType :
Type<CPred<"$_self.isa<::infrt::dt::StringType>()">, "!infrt.string type">,
BuildableType<"$_builder.getType<::infrt::dt::StringType>()">;
def TensorMapType :
Type<CPred<"$_self.isa<::infrt::dt::TensorMapType>()">, "!infrt.tensor_map type">,
BuildableType<"$_builder.getType<::infrt::dt::TensorMapType>()">;
def BufferType : OpaqueType<"b", "buffer", "buffer">;
class INFRT_createI32Attr<string value> : NativeCodeCall<
......
......@@ -63,6 +63,7 @@ mlir::OwningModuleRef LoadMlirFile(const std::string& file_name,
mlir::DialectRegistry registry;
registerCinnDialects(registry);
context->appendDialectRegistry(registry);
context->loadAllAvailableDialects();
mlir::ScopedDiagnosticHandler scope_handler(
context, [](mlir::Diagnostic& diag) {
if (diag.getSeverity() != mlir::DiagnosticSeverity::Error)
......
......@@ -7,7 +7,7 @@ include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/tensorrt/trt_ops.td"
def PD2TRT_Matmul_Lower : Pat<
(PD_MatmulOp $X, $Y, $transpose_X, $transpose_Y, ConstantAttr<F32Attr, "1.0">, ConstantAttr<SI32Attr, "1">),
(PD_MatmulOp $X, $Y, $transpose_X, $transpose_Y, ConstantAttr<F32Attr, "1.0">),
(TRT_MatrixMultiplyOp $X, $transpose_X, $Y, $transpose_Y)>;
//TO DO(shangzhizhou):replace '"INFRT_createI32Attr<"0">' to enum nvinfer1::ElementWiseOperation::kSUM
......
......@@ -13,7 +13,7 @@
// limitations under the License.
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
#include <iostream>
#include <string>
......@@ -29,6 +29,8 @@
#include "paddle/infrt/kernel/tensor_shape_kernels.h"
#include "paddle/infrt/kernel/test_kernels.h"
#ifdef INFRT_WITH_PHI
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include "paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h"
#include "paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.h"
#include "paddle/infrt/kernel/phi/registry.h"
#endif
......@@ -81,6 +83,24 @@ int main(int argc, char** argv) {
}
}
context->loadAllAvailableDialects();
mlir::PassManager pm(context);
#ifdef INFRT_WITH_PHI
mlir::OpPassManager& phi_pass_manager = pm.nest<mlir::FuncOp>();
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(std::make_unique<infrt::phiOpCvtPass>(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass());
#endif
if (mlir::failed(pm.run(*module))) {
std::cout << "\npass failed!\n" << std::endl;
return 4;
}
host_context::TestMlir(module.get(), &registry);
std::cout << std::endl;
......
......@@ -79,7 +79,7 @@ mlir::FuncOp MLIRModelGenImpl::UpdateModelModule(
llvm::SmallVector<mlir::Type, 4> MLIRModelGenImpl::GetModelInputsType(
const infrt::paddle::framework_proto::ProgramDesc &program) {
llvm::SmallVector<mlir::Type, 4> operandTypes;
operandTypes.push_back(infrt::dt::TensorMapType::get(context_));
operandTypes.push_back(infrt::DenseTensorMapType::get(context_));
for (auto &op_desc : main_block_.ops()) {
if (op_desc.type() != "feed") continue;
for (int var_idx = 0; var_idx < op_desc.outputs_size(); ++var_idx) {
......
......@@ -59,6 +59,10 @@ void CopyTo(const Value& from, Value* to) {
to->data = reinterpret_cast<std::vector<int64_t> const&>(arg);
else if (std::is_same<T, tensor::TensorMap>::value)
to->data = reinterpret_cast<tensor::TensorMap const&>(arg);
#ifdef INFRT_WITH_PHI
else if (std::is_same<T, ::phi::DenseTensor>::value)
to->data = reinterpret_cast<::phi::DenseTensor const&>(arg);
#endif
else
LOG(FATAL) << "Not supported Value copy: " << typeid(T).name();
},
......
......@@ -49,8 +49,8 @@ void FillTensorWithConstant(Attribute<T> v, DenseHostTensor *tensor) {
MutableDTArrayView<T>(tensor).Fill(v.get());
}
TensorMap LoadParams(const std::string &path) {
return *(infrt::tensor::LoadParams(path));
TensorMap LoadParams(Attribute<std::string> path) {
return *(infrt::tensor::LoadParams(path.get()));
}
DenseHostTensor TensorMapGetTensor(TensorMap map, Attribute<std::string> name) {
......
......@@ -19,9 +19,8 @@ func @main() {
%input = dt.create_uninit_tensor.f32 [3, 3] -> !Infrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
%path = Infrt.get_string("/Infrt/build/paddle/paddle_1.8_fc_model")
// CHECK-LABEL: loading params
%map = dt.load_params(%path)
%map = dt.load_params() {path="/Infrt/build/paddle/paddle_1.8_fc_model"}
%out = Infrt.call @predict(%input, %map): (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor_map) -> (!Infrt.tensor<X86, NCHW, F32>)
dt.print_tensor (%out : !Infrt.tensor<X86, NCHW, F32>)
......
// RUN: infrtexec -i %s
module {
func @predict(%arg0: !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> {
%2 = "pd.abs"(%arg0) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
Infrt.return %2 : !infrt.dense_tensor<CPU, FP32, NCHW>
}
func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%t = "phi_dt.create_dense_tensor" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%2 = Infrt.call@predict(%t) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
phi_dt.print_tensor(%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return
}
}
// RUN: infrtexec -i %s | FileCheck %s
func @load_tensor_map() {
%path = Infrt.get_string("@CMAKE_BINARY_DIR@/multi_fc_model")
%map = dt.load_params(%path)
%map = dt.load_params(){path="@CMAKE_BINARY_DIR@/multi_fc_model"}
%size = dt.tensor_map_get_size(%map) -> i32
Infrt.print.i32 %size
......
......@@ -32,7 +32,7 @@ function update_pd_ops() {
# compile and install paddle
rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build
cd ${PADDLE_ROOT}/build
cmake .. -DWITH_PYTHON=ON -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` -DWITH_XBYAK=OFF -DWITH_NCCL=OFF -DWITH_RCCL=OFF -DWITH_CRYPTO=OFF
cmake .. -DWITH_PYTHON=ON -DWITH_MKL=OFF -DWITH_GPU=OFF -DPYTHON_EXECUTABLE=`which python3` -DWITH_XBYAK=OFF -DWITH_NCCL=OFF -DWITH_RCCL=OFF -DWITH_CRYPTO=OFF
make -j8 paddle_python print_pten_kernels kernel_signature_generator
cd ${PADDLE_ROOT}/build
./paddle/phi/tools/print_pten_kernels > ../tools/infrt/kernels.json
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册