提交 0007b9e0 编写于 作者: M Megvii Engine Team

build(third_party): update llvm-project

GitOrigin-RevId: 26bb606606d26e6ad70cad48be2bb1f136445abc
上级 404ef808
...@@ -130,10 +130,9 @@ void add_cpu_lowering_pass(mlir::PassManager& manager) { ...@@ -130,10 +130,9 @@ void add_cpu_lowering_pass(mlir::PassManager& manager) {
opt_pm.addPass(mlir::createCanonicalizerPass()); opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass()); opt_pm.addPass(mlir::createCSEPass());
} }
manager.addPass(create_lower_to_affine_pass());
{ {
mlir::OpPassManager& opt_pm = manager.nest<mlir::FuncOp>(); mlir::OpPassManager& opt_pm = manager.nest<mlir::FuncOp>();
opt_pm.addPass(create_lower_to_affine_pass());
opt_pm.addPass(mlir::createCanonicalizerPass()); opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass()); opt_pm.addPass(mlir::createCSEPass());
opt_pm.addPass(mlir::createLoopFusionPass()); opt_pm.addPass(mlir::createLoopFusionPass());
...@@ -150,9 +149,9 @@ void add_cuda_lowering_pass(mlir::PassManager& manager, ...@@ -150,9 +149,9 @@ void add_cuda_lowering_pass(mlir::PassManager& manager,
opt_pm.addPass(mlir::createCanonicalizerPass()); opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass()); opt_pm.addPass(mlir::createCSEPass());
} }
manager.addPass(create_lower_to_gpu_pass());
{ {
mlir::OpPassManager& opt_pm = manager.nest<mlir::FuncOp>(); mlir::OpPassManager& opt_pm = manager.nest<mlir::FuncOp>();
opt_pm.addPass(create_lower_to_gpu_pass());
opt_pm.addPass(mlir::createCanonicalizerPass()); opt_pm.addPass(mlir::createCanonicalizerPass());
opt_pm.addPass(mlir::createCSEPass()); opt_pm.addPass(mlir::createCSEPass());
opt_pm.addPass(mlir::createLoopFusionPass()); opt_pm.addPass(mlir::createLoopFusionPass());
...@@ -179,9 +178,6 @@ thread_local mlir::MLIRContext MLIRCompiler::sm_ctx; ...@@ -179,9 +178,6 @@ thread_local mlir::MLIRContext MLIRCompiler::sm_ctx;
MLIRCompiler::MLIRCompiler(CompNode::DeviceType device_type) MLIRCompiler::MLIRCompiler(CompNode::DeviceType device_type)
: m_device_type{device_type} { : m_device_type{device_type} {
mlir::registerAllDialects();
mlir::registerDialect<MgbDialect>();
#if MGB_CUDA #if MGB_CUDA
if (m_device_type == CompNode::DeviceType::CUDA) { if (m_device_type == CompNode::DeviceType::CUDA) {
LLVMInitializeNVPTXTarget(); LLVMInitializeNVPTXTarget();
......
...@@ -81,7 +81,7 @@ MLIRCPUExecutable::MLIRCPUExecutable(mlir::OwningModuleRef& module, ...@@ -81,7 +81,7 @@ MLIRCPUExecutable::MLIRCPUExecutable(mlir::OwningModuleRef& module,
auto opt_pipeline = mlir::makeOptimizingTransformer(3, 3, 0); auto opt_pipeline = mlir::makeOptimizingTransformer(3, 3, 0);
std::vector<std::string> libs; std::vector<std::string> libs;
auto&& engine = mlir::ExecutionEngine::create( auto&& engine = mlir::ExecutionEngine::create(
*module, opt_pipeline, llvm::None, *module, nullptr, opt_pipeline, llvm::None,
std::vector<llvm::StringRef>(libs.begin(), libs.end()), true, std::vector<llvm::StringRef>(libs.begin(), libs.end()), true,
false); false);
mgb_assert(engine); mgb_assert(engine);
......
...@@ -176,7 +176,8 @@ public: ...@@ -176,7 +176,8 @@ public:
AssignOpLowering, ConstantScalarOpLowering>( AssignOpLowering, ConstantScalarOpLowering>(
&getContext()); &getContext());
if (failed(applyPartialConversion(getFunction(), target, patterns))) { if (failed(applyPartialConversion(getFunction(), target,
std::move(patterns)))) {
signalPassFailure(); signalPassFailure();
} }
} }
......
...@@ -279,7 +279,8 @@ public: ...@@ -279,7 +279,8 @@ public:
ConstantScalarOpLowering, AssignOpLowering>( ConstantScalarOpLowering, AssignOpLowering>(
&getContext(), launch_op); &getContext(), launch_op);
if (failed(applyPartialConversion(func_op, target, patterns))) { if (failed(applyPartialConversion(func_op, target,
std::move(patterns)))) {
signalPassFailure(); signalPassFailure();
} }
} }
......
...@@ -51,7 +51,7 @@ public: ...@@ -51,7 +51,7 @@ public:
populateExpandTanhPattern(patterns, &getContext()); populateExpandTanhPattern(patterns, &getContext());
auto module = getOperation(); auto module = getOperation();
if (failed(applyFullConversion(module, target, patterns))) if (failed(applyFullConversion(module, target, std::move(patterns))))
signalPassFailure(); signalPassFailure();
} }
}; };
......
...@@ -366,7 +366,6 @@ TYPED_TEST(TestJITMlirUnaryElemwise, runGpu) { ...@@ -366,7 +366,6 @@ TYPED_TEST(TestJITMlirUnaryElemwise, runGpu) {
REQUIRE_GPU(1); REQUIRE_GPU(1);
auto cn = CompNode::load("gpu0"); auto cn = CompNode::load("gpu0");
SKIP_MODE(SIN);
SKIP_MODE(ROUND); SKIP_MODE(ROUND);
run_mlir_mode<TypeParam, 1>(cn); run_mlir_mode<TypeParam, 1>(cn);
......
Subproject commit fc031d29bea856f2b91a250fd81c5f9fb79dbe07 Subproject commit c30ab6c2a307cfdce8323ed94c3d70eb2d26bc14
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册