提交 42febfa9 编写于 作者: L Luo Tao

tensorrt convert init

上级 f3e4e42d
nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader)
cc_library(tensorrt DEPS tensorrt_convert)
add_subdirectory(convert)
nv_library(tensorrt_convert SRCS convert.cc DEPS dynload_cuda)
nv_test(tensorrt_convert_test SRCS convert_test.cc DEPS tensorrt paddle_fluid)
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/convert.h"
namespace paddle {
namespace inference {
namespace tensorrt {
void TensorRTConverter::ConvertOp(const framework::OpDesc& op) {
std::string type = op.Type();
PADDLE_ENFORCE(op_registry_.count(type), "No converter registered for op: %s",
type);
std::function<void(const framework::OpDesc&)> op_converter =
op_registry_.at(type);
op_converter(op);
}
void TensorRTConverter::ConvertBlock(const framework::BlockDesc& block) {
for (auto op : block.AllOps()) {
ConvertOp(*op);
}
}
void TensorRTConverter::RegisterOpConverters() {
op_registry_["mul"] = ConvertMul;
op_registry_["conv2d"] = ConvertConv2D;
}
void TensorRTConverter::ConvertMul(const framework::OpDesc& op) {
LOG(INFO) << "convert a fluid mul op to tensorrt fc layer without bias";
}
void TensorRTConverter::ConvertConv2D(const framework::OpDesc& op) {
LOG(INFO) << "convert a fluid Conv2d op to tensorrt conv layer without bias";
}
} // namespace tensorrt
} // namespace inference
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <NvInfer.h>
#include <functional>
#include <string>
#include <unordered_map>
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace inference {
namespace tensorrt {
class TensorRTConverter {
public:
explicit TensorRTConverter(const framework::Scope& scope) : scope_(scope) {
this->RegisterOpConverters();
}
// convert fluid op to tensorrt layer
void ConvertOp(const framework::OpDesc& op);
// convert fluid block to tensorrt network
void ConvertBlock(const framework::BlockDesc& block);
private:
// convert op registry, whose key is the fluid op type, and value is the
// convert tensorrt function name
std::unordered_map<std::string, std::function<void(const framework::OpDesc&)>>
op_registry_;
// fluid inference scope
const framework::Scope& scope_;
// tensorrt input/output tensor list, whose key is the fluid variable name,
// and value is the pointer position of tensorrt tensor
std::unordered_map<std::string, nvinfer1::ITensor*> tr_tensors_;
// register different op converters
void RegisterOpConverters();
// convert a fluid Mul op to tensorrt fc layer without bias
static void ConvertMul(const framework::OpDesc& op);
// convert a fluid Conv2d op to tensorrt conv layer without bias
static void ConvertConv2D(const framework::OpDesc& op);
};
} // namespace tensorrt
} // namespace inference
} // namespace paddle
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/convert.h"
#include <gtest/gtest.h>
#include "paddle/fluid/framework/program_desc.h"
namespace paddle {
namespace inference {
namespace tensorrt {
TEST(tensorrt, ConvertBlock) {
framework::ProgramDesc prog;
auto* block = prog.MutableBlock(0);
auto* mul_op = block->AppendOp();
mul_op->SetType("mul");
auto* conv2d_op = block->AppendOp();
conv2d_op->SetType("conv2d");
framework::Scope scope;
TensorRTConverter converter(scope);
converter.ConvertBlock(*block);
}
} // namespace tensorrt
} // namespace inference
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册