未验证 提交 ec510bfd 编写于 作者: 王明冬 提交者: GitHub

[Infrt] add result check for some infrt op. (#41167)

上级 d006c7ff
...@@ -238,8 +238,8 @@ int InfRtPredictor::Init(const InfRtConfig& config) { ...@@ -238,8 +238,8 @@ int InfRtPredictor::Init(const InfRtConfig& config) {
std::vector<::infrt::Place> valid_places = {{::infrt::TargetType::CPU, std::vector<::infrt::Place> valid_places = {{::infrt::TargetType::CPU,
::infrt::PrecisionType::FLOAT32, ::infrt::PrecisionType::FLOAT32,
::infrt::LayoutType::NCHW}}; ::infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(::infrt::createPhiOpCvtPass(valid_places)); phi_pass_manager.addPass(CreatePhiOpCvtPass(valid_places));
phi_pass_manager.addPass(::infrt::createInfrtOpFusePass()); phi_pass_manager.addPass(CreateInfrtOpFusePass());
if (mlir::failed(pm.run(module_op))) { if (mlir::failed(pm.run(module_op))) {
std::cout << "\npass failed!\n" << std::endl; std::cout << "\npass failed!\n" << std::endl;
return 4; return 4;
......
...@@ -53,7 +53,7 @@ void InfrtOpFusePass::runOnFunction() { ...@@ -53,7 +53,7 @@ void InfrtOpFusePass::runOnFunction() {
} // namespace } // namespace
std::unique_ptr<mlir::Pass> infrt::createInfrtOpFusePass() { std::unique_ptr<mlir::Pass> infrt::CreateInfrtOpFusePass() {
return std::make_unique<InfrtOpFusePass>(); return std::make_unique<InfrtOpFusePass>();
} }
......
...@@ -19,6 +19,6 @@ namespace infrt { ...@@ -19,6 +19,6 @@ namespace infrt {
/* /*
* infrtOpFusePass. * infrtOpFusePass.
*/ */
std::unique_ptr<mlir::Pass> createInfrtOpFusePass(); std::unique_ptr<mlir::Pass> CreateInfrtOpFusePass();
} // namespace infrt } // namespace infrt
...@@ -5,10 +5,6 @@ endif() ...@@ -5,10 +5,6 @@ endif()
add_subdirectory(ir) add_subdirectory(ir)
add_subdirectory(pass) add_subdirectory(pass)
add_executable(phi-ir-exec phi_ir_exec.cc)
target_link_libraries(phi-ir-exec infrt)
add_executable(phi-exec phi_exec.cc) add_executable(phi-exec phi_exec.cc)
target_link_libraries(phi-exec infrt) target_link_libraries(phi-exec infrt)
......
...@@ -261,11 +261,11 @@ void PhiOpConvertPass::getDependentDialects( ...@@ -261,11 +261,11 @@ void PhiOpConvertPass::getDependentDialects(
mlir::PassRegistration<PhiOpConvertPass> phi_op_convert; mlir::PassRegistration<PhiOpConvertPass> phi_op_convert;
std::unique_ptr<mlir::Pass> infrt::createPhiOpCvtPass( std::unique_ptr<mlir::Pass> infrt::CreatePhiOpCvtPass(
std::vector<Place> valid_places) { std::vector<Place> valid_places) {
return std::make_unique<PhiOpConvertPass>(valid_places); return std::make_unique<PhiOpConvertPass>(valid_places);
} }
std::unique_ptr<mlir::Pass> infrt::createPhiOpCvtPass() { std::unique_ptr<mlir::Pass> infrt::CreatePhiOpCvtPass() {
return std::make_unique<PhiOpConvertPass>(); return std::make_unique<PhiOpConvertPass>();
} }
...@@ -21,8 +21,8 @@ namespace infrt { ...@@ -21,8 +21,8 @@ namespace infrt {
* phiOpCvtPass. * phiOpCvtPass.
* Convert the general operators from pd Dialect to phi dialect. * Convert the general operators from pd Dialect to phi dialect.
*/ */
std::unique_ptr<mlir::Pass> createPhiOpCvtPass(std::vector<Place> valid_places); std::unique_ptr<mlir::Pass> CreatePhiOpCvtPass(std::vector<Place> valid_places);
std::unique_ptr<mlir::Pass> createPhiOpCvtPass(); std::unique_ptr<mlir::Pass> CreatePhiOpCvtPass();
} // namespace infrt } // namespace infrt
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
#include <iostream>
#include <string>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/dialect/phi/pass/phi_op_convert_pass.h"
int main(int argc, char** argv) {
static llvm::cl::opt<std::string> input_file(
llvm::cl::Positional,
llvm::cl::desc("Specify input filename"),
llvm::cl::init("-"));
llvm::cl::ParseCommandLineOptions(argc, argv);
mlir::MLIRContext* context = infrt::Global::getMLIRContext();
auto module = infrt::dialect::LoadMlirFile(input_file.c_str(), context);
context->loadAllAvailableDialects();
module->dump();
mlir::PassManager pm(context);
mlir::OpPassManager& phi_pass_manager = pm.nest<mlir::FuncOp>();
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass());
if (mlir::failed(pm.run(*module))) {
std::cout << "\npass failed!\n" << std::endl;
return 4;
}
module->dump();
return 0;
}
...@@ -98,8 +98,8 @@ int main(int argc, char** argv) { ...@@ -98,8 +98,8 @@ int main(int argc, char** argv) {
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU, std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32, infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}}; infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places)); phi_pass_manager.addPass(infrt::CreatePhiOpCvtPass(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass()); phi_pass_manager.addPass(infrt::CreateInfrtOpFusePass());
#endif #endif
if (mlir::failed(pm.run(*module))) { if (mlir::failed(pm.run(*module))) {
......
// RUN: infrtexec -i %s | FileCheck %s
module {
func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%0 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 2.0 : f32, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1, 3, 6, 6]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%1 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 2.0 : f32, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1, 3, 3, 3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%2 = "pd.conv2d"(%0, %1) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [3 : i32, 3 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 1, 5, 5], value=[0,0,0,0,0,0,48,72,72,24,0,72,108,108,36,0,72,108,108,36,0,24,36,36,12]
phi_dt.print_tensor (%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%3 = "pd.relu"(%2) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// dense_tensor: shape=shape[1, 1, 5, 5], value=[0,0,0,0,0,0,48,72,72,24,0,72,108,108,36,0,72,108,108,36,0,24,36,36,12]
phi_dt.print_tensor (%3 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%4 = "pd.pool2d"(%2) {adaptive = false, ceil_mode = false, data_format = "NCHW", exclusive = true, global_pooling = false, ksize = [2 : i32, 2 : i32], padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], pooling_type = "avg", strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 1, 3, 3], value=[0,0,0,0,75,60,0,60,48]
phi_dt.print_tensor (%4 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%5 = "pd.flatten_contiguous_range"(%4) {start_axis = 1 : si32, stop_axis = 3 : si32} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 9], value=[0,0,0,0,75,60,0,60,48]
phi_dt.print_tensor (%5 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%6 = "pd.elementwise_add"(%5, %5) {axis = 1 : si32} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 9], value=[0,0,0,0,150,120,0,120,96]
phi_dt.print_tensor (%6 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%7 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 4.0 : f32, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[9, 3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%8 = "pd.matmul_v2"(%5, %7) {trans_x = false, trans_y = false} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: dense_tensor: shape=shape[1, 3], value=[972,972,972]
phi_dt.print_tensor (%8 : !infrt.dense_tensor<CPU, FP32, NCHW>)
%scale = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=1.0:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%bias = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=1.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%mean = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=2.0:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%var = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=0.0:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%1, %scale, %bias, %mean, %var) {data_layout = "NCHW", epsilon = 0.01 : f32, momentum = 0.5 : f32} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
// CHECK: dense_tensor: shape=shape[1, 3, 3, 3], value=[1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8]
phi_dt.print_tensor (%Y : !infrt.dense_tensor<CPU, FP32, NCHW>)
infrt.return
}
}
...@@ -86,8 +86,8 @@ TEST(ABS_MODEL, convert_and_execute) { ...@@ -86,8 +86,8 @@ TEST(ABS_MODEL, convert_and_execute) {
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU, std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32, infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}}; infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places)); phi_pass_manager.addPass(infrt::CreatePhiOpCvtPass(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass()); phi_pass_manager.addPass(infrt::CreateInfrtOpFusePass());
if (mlir::failed(pm.run(module_))) { if (mlir::failed(pm.run(module_))) {
std::cout << "\npass failed!\n" << std::endl; std::cout << "\npass failed!\n" << std::endl;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册