未验证 提交 acdf0663 编写于 作者: S Shang Zhizhou 提交者: GitHub

update pd_2_trt lower pass (#40019)

* update pd_2_trt lower pass

* update pd_2_trt lower pass

* update style

* udpate

* change trt.graph to trt.create_engine

* update comments

* update comments

* add test
上级 852a872f
...@@ -53,9 +53,9 @@ bool reverseDfs(std::vector<mlir::Operation *> source, ...@@ -53,9 +53,9 @@ bool reverseDfs(std::vector<mlir::Operation *> source,
} }
// merge the first&second graph op to a new graph op. // merge the first&second graph op to a new graph op.
void mergeTwoAdjacentGraphOp(mlir::OpBuilder &builder, // NOLINT void mergeTwoAdjacentCreateEngineOp(mlir::OpBuilder &builder, // NOLINT
mlir::pd::GraphOp first, CreateEngineOp first,
mlir::pd::GraphOp second) { CreateEngineOp second) {
// comput inputs and outputs // comput inputs and outputs
::llvm::SmallVector<mlir::Value, 4> inputs(first.getOperands()), outputs; ::llvm::SmallVector<mlir::Value, 4> inputs(first.getOperands()), outputs;
for (mlir::Value input : second.getOperands()) { for (mlir::Value input : second.getOperands()) {
...@@ -84,7 +84,8 @@ void mergeTwoAdjacentGraphOp(mlir::OpBuilder &builder, // NOLINT ...@@ -84,7 +84,8 @@ void mergeTwoAdjacentGraphOp(mlir::OpBuilder &builder, // NOLINT
// create the new graph op // create the new graph op
builder.setInsertionPoint(first); builder.setInsertionPoint(first);
auto loc = first.getLoc(); auto loc = first.getLoc();
auto graph_op = builder.create<mlir::pd::GraphOp>(loc, return_types, inputs); auto graph_op =
builder.create<CreateEngineOp>(loc, return_types, inputs, true);
mlir::Block *block = new mlir::Block; mlir::Block *block = new mlir::Block;
auto copy_range = second.getBody()->without_terminator(); auto copy_range = second.getBody()->without_terminator();
block->getOperations().splice(block->begin(), block->getOperations().splice(block->begin(),
...@@ -97,7 +98,7 @@ void mergeTwoAdjacentGraphOp(mlir::OpBuilder &builder, // NOLINT ...@@ -97,7 +98,7 @@ void mergeTwoAdjacentGraphOp(mlir::OpBuilder &builder, // NOLINT
copy_range.begin(), copy_range.begin(),
copy_range.end()); copy_range.end());
builder.setInsertionPointToEnd(block); builder.setInsertionPointToEnd(block);
builder.create<mlir::pd::ReturnOp>(loc, outputs); builder.create<::infrt::dialect::ReturnOp>(loc, outputs);
graph_op.body().push_back(block); graph_op.body().push_back(block);
// mapping the output // mapping the output
...@@ -149,13 +150,12 @@ void TRTGraphFusePass::runOnFunction() { ...@@ -149,13 +150,12 @@ void TRTGraphFusePass::runOnFunction() {
do { do {
changed = false; changed = false;
for (auto &op : body) { for (auto &op : body) {
mlir::pd::GraphOp graph_op = CreateEngineOp graph_op = ::llvm::dyn_cast_or_null<CreateEngineOp>(&op);
::llvm::dyn_cast_or_null<mlir::pd::GraphOp>(&op);
if (nullptr == graph_op) continue; if (nullptr == graph_op) continue;
for (auto user_op : op.getUsers()) { for (auto user_op : op.getUsers()) {
mlir::pd::GraphOp user_graph_op = CreateEngineOp user_graph_op =
::llvm::dyn_cast_or_null<mlir::pd::GraphOp>(user_op); ::llvm::dyn_cast_or_null<CreateEngineOp>(user_op);
if (nullptr == user_graph_op) continue; if (nullptr == user_graph_op) continue;
// get all dst input nodes except src. // get all dst input nodes except src.
std::vector<mlir::Operation *> source_nodes; std::vector<mlir::Operation *> source_nodes;
...@@ -168,7 +168,7 @@ void TRTGraphFusePass::runOnFunction() { ...@@ -168,7 +168,7 @@ void TRTGraphFusePass::runOnFunction() {
// Reverse DFS from the source_nodes. // Reverse DFS from the source_nodes.
if (!reverseDfs(source_nodes, if (!reverseDfs(source_nodes,
[&op](const mlir::Operation *n) { return n == &op; })) { [&op](const mlir::Operation *n) { return n == &op; })) {
mergeTwoAdjacentGraphOp(builder, graph_op, user_graph_op); mergeTwoAdjacentCreateEngineOp(builder, graph_op, user_graph_op);
changed = true; changed = true;
break; break;
} }
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/tensorrt/trt_ops.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
...@@ -26,28 +28,28 @@ namespace trt { ...@@ -26,28 +28,28 @@ namespace trt {
* *
* func @main() -> tensor<?xf32> { * func @main() -> tensor<?xf32> {
* %a = "pd.feed"()... * %a = "pd.feed"()...
* %c = "pd.graph"(%a) { * %c = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "pd.return" %m * "Infrt.return" %m
* } ... * } ...
* %d = "pd.graph"(%c) { * %d = "trt.create_engine"(%c) {
* %m = "pd.conv3d"(%c)... * %m = "pd.conv3d"(%c)...
* "pd.return" %m * "Infrt.return" %m
* } ... * } ...
* %f = "pd.graph"(%a) { * %f = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "pd.return" %m * "Infrt.return" %m
* } ... * } ...
* "pd.fetch" %d, %f * "pd.fetch" %d, %f
* *
* destination func: * destination func:
* func @main() -> tensor<?xf32> { * func @main() -> tensor<?xf32> {
* %a = "pd.feed"()... * %a = "pd.feed"()...
* %d, %f = "pd.graph"(%a) { * %d, %f = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)... * %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)... * %s = "pd.conv2d"(%a)...
* "pd.return" %n, %s * "Infrt.return" %n, %s
* } ... * } ...
* "pd.fetch" %d, %f * "pd.fetch" %d, %f
* } * }
...@@ -55,6 +57,9 @@ namespace trt { ...@@ -55,6 +57,9 @@ namespace trt {
class TRTGraphFusePass class TRTGraphFusePass
: public mlir::PassWrapper<TRTGraphFusePass, mlir::FunctionPass> { : public mlir::PassWrapper<TRTGraphFusePass, mlir::FunctionPass> {
public: public:
void getDependentDialects(mlir::DialectRegistry &registry) const override {
registry.insert<TensorRTDialect, ::infrt::dialect::INFRTDialect>();
}
::llvm::StringRef getName() const override { return "trtGraphFusePass"; } ::llvm::StringRef getName() const override { return "trtGraphFusePass"; }
void runOnFunction() override; void runOnFunction() override;
}; };
......
...@@ -22,18 +22,17 @@ namespace infrt { ...@@ -22,18 +22,17 @@ namespace infrt {
namespace trt { namespace trt {
// Implementation of the trtGraphSplitPass。 // Implementation of the trtGraphSplitPass。
void TRTGraphSplitPass::runOnFunction() { void TRTGraphSplitPass::runOnFunction() {
std::vector<mlir::pd::GraphOp> worklist; std::vector<CreateEngineOp> worklist;
mlir::Block& block = getFunction().front(); mlir::Block& block = getFunction().front();
for (auto& op : block) { for (auto& op : block) {
mlir::pd::GraphOp graph_op = CreateEngineOp graph_op = ::llvm::dyn_cast_or_null<CreateEngineOp>(&op);
::llvm::dyn_cast_or_null<mlir::pd::GraphOp>(&op);
if (nullptr != graph_op && if (nullptr != graph_op &&
graph_op.getBody()->getOperations().size() <= min_subgraph_size_) { graph_op.getBody()->getOperations().size() <= min_subgraph_size_) {
worklist.push_back(graph_op); worklist.push_back(graph_op);
} }
} }
while (!worklist.empty()) { while (!worklist.empty()) {
mlir::pd::GraphOp graph_op = worklist.back(); CreateEngineOp graph_op = worklist.back();
worklist.pop_back(); worklist.pop_back();
mlir::Block* body = graph_op.getBody(); mlir::Block* body = graph_op.getBody();
auto return_op = body->getTerminator(); auto return_op = body->getTerminator();
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/tensorrt/trt_ops.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
...@@ -27,11 +29,11 @@ namespace trt { ...@@ -27,11 +29,11 @@ namespace trt {
* *
* func @main() -> tensor<?xf32> { * func @main() -> tensor<?xf32> {
* %a = "pd.feed"()... * %a = "pd.feed"()...
* %d, %f = "pd.graph"(%a) { * %d, %f = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)... * %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)... * %s = "pd.conv2d"(%a)...
* "pd.return" (%n, %s) * "Infrt.return" (%n, %s)
* } ... * } ...
* "pd.fetch" (%d, %f) * "pd.fetch" (%d, %f)
* } * }
...@@ -49,6 +51,9 @@ class TRTGraphSplitPass ...@@ -49,6 +51,9 @@ class TRTGraphSplitPass
: public mlir::PassWrapper<TRTGraphSplitPass, mlir::FunctionPass> { : public mlir::PassWrapper<TRTGraphSplitPass, mlir::FunctionPass> {
public: public:
::llvm::StringRef getName() const override { return "trtGraphSplitPass"; } ::llvm::StringRef getName() const override { return "trtGraphSplitPass"; }
void getDependentDialects(mlir::DialectRegistry &registry) const override {
registry.insert<TensorRTDialect, ::infrt::dialect::INFRTDialect>();
}
void runOnFunction() override; void runOnFunction() override;
explicit TRTGraphSplitPass(size_t min_subgraph_size = 3) explicit TRTGraphSplitPass(size_t min_subgraph_size = 3)
: min_subgraph_size_(min_subgraph_size) {} : min_subgraph_size_(min_subgraph_size) {}
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h" #include "paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h"
#include "mlir/IR/Builders.h" #include <mlir/IR/Builders.h>
#include "mlir/Transforms/DialectConversion.h" #include <mlir/Transforms/DialectConversion.h>
#include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
...@@ -22,12 +22,10 @@ namespace trt { ...@@ -22,12 +22,10 @@ namespace trt {
#include "paddle/infrt/dialect/tensorrt/pd_lower_to_trt.cpp.inc" // NOLINT #include "paddle/infrt/dialect/tensorrt/pd_lower_to_trt.cpp.inc" // NOLINT
using namespace mlir;
void TRTOpConverterPass::runOnOperation() { void TRTOpConverterPass::runOnOperation() {
// The first thing to define is the conversion target. This will define the // The first thing to define is the conversion target. This will define the
// final target for this lowering. // final target for this lowering.
ConversionTarget target(getContext()); ::mlir::ConversionTarget target(getContext());
// We define the specific operations, or dialects, that are legal targets for // We define the specific operations, or dialects, that are legal targets for
// this lowering. In our case, we are lowering to TensorRTDialect from // this lowering. In our case, we are lowering to TensorRTDialect from
...@@ -36,13 +34,13 @@ void TRTOpConverterPass::runOnOperation() { ...@@ -36,13 +34,13 @@ void TRTOpConverterPass::runOnOperation() {
// Now that the conversion target has been defined, we just need to provide // Now that the conversion target has been defined, we just need to provide
// the set of patterns that will lower the TensorRT operations. // the set of patterns that will lower the TensorRT operations.
RewritePatternSet patterns(&getContext()); ::mlir::RewritePatternSet patterns(&getContext());
populateWithGenerated(patterns); populateWithGenerated(patterns);
// With the target and rewrite patterns defined, we can now attempt the // With the target and rewrite patterns defined, we can now attempt the
// conversion. The conversion will signal failure if any of our `illegal` // conversion. The conversion will signal failure if any of our `illegal`
// operations were not converted successfully. // operations were not converted successfully.
if (failed( if (::mlir::failed(
applyPartialConversion(getOperation(), target, std::move(patterns)))) applyPartialConversion(getOperation(), target, std::move(patterns))))
signalPassFailure(); signalPassFailure();
} }
......
...@@ -25,11 +25,11 @@ namespace trt { ...@@ -25,11 +25,11 @@ namespace trt {
* source ir: * source ir:
* func @main() -> tensor<?xf32> { * func @main() -> tensor<?xf32> {
* %a = "pd.feed"()... * %a = "pd.feed"()...
* %d, %f = "pd.graph"(%a) { * %d, %f = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)... * %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)... * %s = "pd.conv2d"(%a)...
* "pd.return" %n, %s * "Infrt.return" %n, %s
* } ... * } ...
* "pd.fetch" %d, %f * "pd.fetch" %d, %f
* } * }
...@@ -37,11 +37,11 @@ namespace trt { ...@@ -37,11 +37,11 @@ namespace trt {
* destination ir: * destination ir:
* func @main() -> tensor<?xf32> { * func @main() -> tensor<?xf32> {
* %a = "pd.feed"()... * %a = "pd.feed"()...
* %d, %f = "pd.graph"(%a) { * %d, %f = "trt.create_engine"(%a) {
* %m = "trt.Convolution"(%a)... * %m = "trt.Convolution"(%a)...
* %n = "trt.Convolution"(%m)... * %n = "trt.Convolution"(%m)...
* %s = "trt.Convolution"(%a)... * %s = "trt.Convolution"(%a)...
* "pd.return" %n, %s * "Infrt.return" %n, %s
* } ... * } ...
* "pd.fetch" %d, %f * "pd.fetch" %d, %f
* } * }
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h" #include "paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h"
#include <mlir/IR/Builders.h> #include <mlir/IR/Builders.h>
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
namespace infrt { namespace infrt {
...@@ -33,16 +34,14 @@ void TRTOpTellerPass::runOnFunction() { ...@@ -33,16 +34,14 @@ void TRTOpTellerPass::runOnFunction() {
auto *op = worklist.back(); auto *op = worklist.back();
worklist.pop_back(); worklist.pop_back();
if (op == nullptr) continue; if (op == nullptr) continue;
auto op1 = ::llvm::dyn_cast_or_null<mlir::pd::FeedOp>(op); if (::llvm::dyn_cast_or_null<mlir::pd::FeedOp>(op)) continue;
if (op1) continue; if (::llvm::dyn_cast_or_null<mlir::pd::FetchOp>(op)) continue;
auto op2 = ::llvm::dyn_cast_or_null<mlir::pd::FetchOp>(op); if (::llvm::dyn_cast_or_null<mlir::pd::GraphOp>(op)) continue;
if (op2) continue; if (::llvm::dyn_cast_or_null<CreateEngineOp>(op)) continue;
auto op3 = ::llvm::dyn_cast_or_null<mlir::pd::GraphOp>(op);
if (op3) continue;
builder.setInsertionPoint(op); builder.setInsertionPoint(op);
auto loc = getFunction().getLoc(); auto loc = getFunction().getLoc();
auto graph_op = builder.create<mlir::pd::GraphOp>( auto graph_op = builder.create<CreateEngineOp>(
loc, op->getResultTypes(), op->getOperands()); loc, op->getResultTypes(), op->getOperands(), true);
::llvm::SmallVector<mlir::Value, 4> tblgen_repl_values; ::llvm::SmallVector<mlir::Value, 4> tblgen_repl_values;
for (auto v : for (auto v :
...@@ -55,7 +54,7 @@ void TRTOpTellerPass::runOnFunction() { ...@@ -55,7 +54,7 @@ void TRTOpTellerPass::runOnFunction() {
graph_op.body().push_back(block); graph_op.body().push_back(block);
op->moveBefore(block, block->begin()); op->moveBefore(block, block->begin());
builder.setInsertionPointToEnd(block); builder.setInsertionPointToEnd(block);
builder.create<mlir::pd::ReturnOp>(loc, op->getResults()); builder.create<::infrt::dialect::ReturnOp>(loc, op->getResults());
} }
} }
} // namespace trt } // namespace trt
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/tensorrt/trt_ops.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
...@@ -35,17 +37,17 @@ namespace trt { ...@@ -35,17 +37,17 @@ namespace trt {
* destination func: * destination func:
* func @main() -> tensor<?xf32> { * func @main() -> tensor<?xf32> {
* %a = "pd.feed"()... * %a = "pd.feed"()...
* %c = "pd.graph"(%a) { * %c = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "pd.return" (%m) * "Infrt.return" (%m)
* } ... * } ...
* %d = "pd.graph"(%c) { * %d = "trt.create_engine"(%c) {
* %m = "pd.conv3d"(%c)... * %m = "pd.conv3d"(%c)...
* "pd.return" (%m) * "Infrt.return" (%m)
* } ... * } ...
* %f = "pd.graph"(%a) { * %f = "trt.create_engine"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "pd.return" (%m) * "Infrt.return" (%m)
* } ... * } ...
* "pd.fetch" (%d, %f) * "pd.fetch" (%d, %f)
* } * }
...@@ -55,6 +57,9 @@ namespace trt { ...@@ -55,6 +57,9 @@ namespace trt {
class TRTOpTellerPass class TRTOpTellerPass
: public mlir::PassWrapper<TRTOpTellerPass, mlir::FunctionPass> { : public mlir::PassWrapper<TRTOpTellerPass, mlir::FunctionPass> {
public: public:
void getDependentDialects(mlir::DialectRegistry &registry) const override {
registry.insert<TensorRTDialect, ::infrt::dialect::INFRTDialect>();
}
::llvm::StringRef getName() const override { return "trtOpTellerPass"; } ::llvm::StringRef getName() const override { return "trtOpTellerPass"; }
void runOnFunction() override; void runOnFunction() override;
}; };
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <mlir/Interfaces/InferTypeOpInterface.h> #include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h> #include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/basic_kernels.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
......
...@@ -7,25 +7,14 @@ include "mlir/Interfaces/CallInterfaces.td" ...@@ -7,25 +7,14 @@ include "mlir/Interfaces/CallInterfaces.td"
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/tensorrt/trt_op_base.td" include "paddle/infrt/dialect/tensorrt/trt_op_base.td"
def TRT_FetchOp : TRT_Op<"fetch", [Terminator]> { def TRT_CreateEngineOp : TRT_Op<"create_engine", [SingleBlockImplicitTerminator<"::infrt::dialect::ReturnOp">]> {
let summary = "TensorRT engine return operation";
let description = [{
The `trt.fetch` operation terminates and returns values for the
`trt.graph` operation.
}];
let arguments = (ins Variadic<TRT_Tensor>:$inputs);
}
def TRT_GraphOp : TRT_Op<"graph", [SingleBlockImplicitTerminator<"FetchOp">]> {
let summary = "trt Graph Op"; let summary = "trt Graph Op";
let description = [{ let description = [{
Describe a tensorrt subgraph. Describe a tensorrt subgraph.
}]; }];
let regions = (region SizedRegion<1>:$body); let regions = (region SizedRegion<1>:$body);
let arguments = (ins Variadic<TRT_Tensor>:$inputs); let arguments = (ins Variadic<TRT_Tensor>:$inputs, DefaultValuedAttr<BoolAttr, "true">:$run_once);
let results = (outs Variadic<TRT_Tensor>:$outputs); let results = (outs Variadic<TRT_Tensor>:$outputs);
} }
def TRT_ActivationOp : TRT_Op<"Activation", [NoSideEffect]> { def TRT_ActivationOp : TRT_Op<"Activation", [NoSideEffect]> {
......
// RUN: trt-exec %s
// CHECK-LABEL: @main // CHECK-LABEL: @main
func @main() -> tensor<?xf32> { func @main() -> tensor<?xf32> {
%bias = "pd.feed"() {name="input0"} : () -> tensor<?xf32> %bias = "pd.feed"() {name="input0"} : () -> tensor<?xf32>
......
...@@ -21,10 +21,11 @@ build_dir = "@CMAKE_BINARY_DIR@" ...@@ -21,10 +21,11 @@ build_dir = "@CMAKE_BINARY_DIR@"
config.llvm_tools_dir = os.path.join(build_dir, "third_party/install/llvm/bin") config.llvm_tools_dir = os.path.join(build_dir, "third_party/install/llvm/bin")
config.llvm_tools_dir = os.path.join(build_dir, "/third_party/install/llvm/lib") config.llvm_tools_dir = os.path.join(build_dir, "/third_party/install/llvm/lib")
infrtopt_bin = os.path.join(build_dir, "paddle/infrt/dialect/") infrtopt_bin = os.path.join(build_dir, "paddle/infrt/dialect/")
trtexec_bin = os.path.join(build_dir, "paddle/infrt/dialect/tensorrt/")
infrtexec_bin = os.path.join(build_dir, "paddle/infrt/host_context/") infrtexec_bin = os.path.join(build_dir, "paddle/infrt/host_context/")
llvm_bin = os.path.join(build_dir, "third_party/install/llvm/bin/") llvm_bin = os.path.join(build_dir, "third_party/install/llvm/bin/")
config.environment['PATH'] = os.path.pathsep.join( config.environment['PATH'] = os.path.pathsep.join(
(infrtopt_bin, infrtexec_bin, llvm_bin, config.environment['PATH'])) (infrtopt_bin, infrtexec_bin, trtexec_bin, llvm_bin, config.environment['PATH']))
config.suffixes = ['.mlir'] config.suffixes = ['.mlir']
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册