未验证 提交 e91a7896 编写于 作者: Z Zhang Jun 提交者: GitHub

[inference] Remove log about fluid and fix uninitialization warning (#51558)

* Remove log about fluid
* Remove useless forward declarations
* Fix uninitialization warning (trt onehot)
上级 e9c3da9e
......@@ -23,16 +23,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -46,10 +36,9 @@ class ActivationOpConverter : public OpConverter {
// Here the two nullptr looks strange, that's because the
// framework::OpDesc's constructor is strange.
framework::OpDesc op_desc(op, nullptr);
VLOG(3)
<< "convert a fluid Activation op to tensorrt activation layer whose "
"type is "
<< op_type_;
VLOG(3) << "convert a Activation op to tensorrt activation layer whose "
"type is "
<< op_type_;
auto* input_tensor = engine_->GetITensor(op_desc.Input("X")[0]);
auto op_pair = ops.find(op_type_);
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,7 +27,7 @@ class AffineChannelOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid affine_channel op to tensorrt scale nd layer";
VLOG(3) << "convert a affine_channel op to tensorrt scale nd layer";
framework::OpDesc op_desc(op, nullptr);
std::string input_name = op_desc.Input("X").front();
......
......@@ -25,7 +25,7 @@ class AnchorGeneratorOpConverter : public OpConverter {
void operator()(const paddle::framework::proto::OpDesc& op,
const paddle::framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid anchor generator op to tensorrt plugin";
VLOG(3) << "convert a anchor generator op to tensorrt plugin";
framework::OpDesc op_desc(op, nullptr);
std::string input_name = op_desc.Input("Input").front();
std::string anchor_name = op_desc.Output("Anchors").front();
......
......@@ -14,15 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -32,7 +23,7 @@ class ArgMaxOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid arg_max op to tensorrt topk layer";
VLOG(3) << "convert a arg_max op to tensorrt topk layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -14,15 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -32,7 +23,7 @@ class ArgMinOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid arg_min op to tensorrt topk layer";
VLOG(3) << "convert a arg_min op to tensorrt topk layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -17,15 +17,6 @@ limitations under the License. */
namespace nvinfer1 {
class IScaleLayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
......@@ -36,7 +27,7 @@ class BatchNormOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid batch norm op to tensorrt batch_norm";
VLOG(3) << "convert a batch norm op to tensorrt batch_norm";
framework::OpDesc op_desc(op, nullptr);
auto* X = engine_->GetITensor(op_desc.Input("X").front());
......
......@@ -15,15 +15,6 @@ limitations under the License. */
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -33,7 +24,7 @@ class BilinearInterpolateV2OpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid bilinear_interp_v2 op";
VLOG(3) << "convert a bilinear_interp_v2 op to tensorrt OP";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -30,7 +30,7 @@ class CAllReduceOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid callreduce op to tensorrt layer";
VLOG(4) << "convert callreduce op to tensorrt layer";
if (!engine_->with_dynamic_shape()) {
PADDLE_THROW(
platform::errors::Fatal("Unsupported static graph mode. Please set "
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -14,19 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace nvinfer1 {
class ILayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +23,7 @@ class CeluOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid celu op to tensorrt layer";
VLOG(4) << "convert celu op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,7 +27,7 @@ class ClipOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(5130)
VLOG(3) << "convert a paddle clip op to tensorrt IActivationLayer.";
VLOG(3) << "convert a clip op to tensorrt IActivationLayer.";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class ConcatOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a paddle concat op to tensorrt concat layer";
VLOG(3) << "convert a concat op to tensorrt concat layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -16,16 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/phi/common/data_type.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -38,7 +28,7 @@ void ConvertConv2d(TensorRTEngine* engine,
RegistFunc fadd_layer,
SetDilationFunc fset_dilation,
const std::string& name) {
VLOG(3) << "convert a fluid " << name << " op to tensorrt layer without bias";
VLOG(3) << "convert a " << name << " op to tensorrt layer without bias";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ void ConvertConv3d(TensorRTEngine* engine,
RegistFunc fadd_layer,
SetDilationFunc fset_dilation,
const std::string& name) {
VLOG(3) << "convert a fluid " << name << " op to tensorrt layer without bias";
VLOG(3) << "convert a " << name << " op to tensorrt layer without bias";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -18,15 +18,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class DropoutOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid dropout op to tensorrt dropout layer";
VLOG(3) << "convert a dropout op to tensorrt dropout layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -25,7 +25,7 @@ class ElementwiseTensorOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "Convert a fluid elementwise op to TensorRT IElementWiseLayer";
VLOG(3) << "Convert a elementwise op to TensorRT IElementWiseLayer";
framework::OpDesc op_desc(op, nullptr);
auto* X = engine_->GetITensor(op_desc.Input("X").front());
nvinfer1::ITensor* Y = nullptr;
......
......@@ -16,15 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -17,15 +17,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.h"
#include "paddle/phi/core/ddim.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -35,7 +26,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid EmbEltwiseLayerNorm op to tensorrt layer";
VLOG(4) << "convert EmbEltwiseLayerNorm op to tensorrt layer";
// get the presistable var's data
auto GetWeight = [&](const std::string& var_name,
framework::DDim* dim) -> TensorRTEngine::Weight {
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -35,6 +25,7 @@ class EqualOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert equal op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
nvinfer1::ILayer* layer = nullptr;
......@@ -87,6 +78,7 @@ class NotEqualOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert not_equal op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
nvinfer1::ILayer* layer = nullptr;
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -33,7 +23,7 @@ class ExpandV2OpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a paddle expand_v2 op to trt expand layer.";
VLOG(3) << "convert a expand_v2 op to trt expand layer.";
framework::OpDesc op_desc(op, nullptr);
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
auto inputs = op_desc.Inputs();
......
......@@ -11,16 +11,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -132,7 +122,7 @@ class FcOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid fc op to tensorrt fc layer without bias";
VLOG(3) << "convert a fc op to tensorrt fc layer without bias";
framework::OpDesc op_desc(op, nullptr);
auto output_name = op_desc.Output("Out").front();
auto input_names = op_desc.InputNames();
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -24,7 +24,7 @@ class FillConstantBatchSizeLikeOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(7000)
VLOG(4) << "convert a fluid fill_constant_batch_size_like op to tensorrt "
VLOG(4) << "convert a fill_constant_batch_size_like op to tensorrt "
"fill_constant_batch_size_like layer";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -23,8 +23,7 @@ class FillConstantOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4)
<< "convert a fluid fill_constant op to tensorrt fill_constant layer";
VLOG(3) << "convert a fill_constant op to tensorrt fill_constant layer";
framework::OpDesc op_desc(op, nullptr);
int dtype = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype"));
......
......@@ -11,15 +11,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -31,7 +22,7 @@ class FlattenContiguousRangeOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid flatten_contiguous_range op to tensorrt layer";
VLOG(3) << "convert a flatten_contiguous_range op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -11,15 +11,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -16,16 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/utils.h"
#include "paddle/fluid/inference/tensorrt/plugin/lookup_table.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class GatherOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid gather op to tensorrt gather layer";
VLOG(3) << "convert a gather op to tensorrt gather layer";
framework::OpDesc op_desc(op, nullptr);
std::string input_name = op_desc.Input("X").front();
......
......@@ -15,19 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h"
namespace nvinfer1 {
class ILayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -43,7 +30,7 @@ class GeluOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid gelu op to tensorrt gelu layer";
VLOG(4) << "convert gelu op to tensorrt gelu layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -27,7 +27,7 @@ class GridSamplerOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(8510)
VLOG(3) << "convert a fluid grid_sampler op to tensorrt GridSample layer";
VLOG(3) << "convert a grid_sampler op to tensorrt GridSample layer";
framework::OpDesc op_desc(op, nullptr);
std::string input_x_name = op_desc.Input("X").front();
std::string input_grid_name = op_desc.Input("Grid").front();
......
......@@ -16,15 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -34,7 +25,7 @@ class GroupNormOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid group_norm op to tensorrt group_norm plugin";
VLOG(4) << "convert a group_norm op to tensorrt group_norm plugin";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,7 +27,7 @@ class HardSigmoidOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(5130)
VLOG(3) << "convert a fluid HardSigmoid op to tensorrt IActivationLayer "
VLOG(3) << "convert a HardSigmoid op to tensorrt IActivationLayer "
"layer without bias";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class HardSwishOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid HardSwish op to tensorrt HardSwish plugin";
VLOG(4) << "convert HardSwish op to tensorrt HardSwish plugin";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -15,19 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h"
namespace nvinfer1 {
class IPluginLayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -24,7 +24,7 @@ class LayerNormOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid layer_norm op to tensorrt layer_norm plugin";
VLOG(4) << "convert a layer_norm op to tensorrt layer_norm plugin";
framework::OpDesc op_desc(op, nullptr);
auto* X = engine_->GetITensor(op_desc.Input("X").front());
......
......@@ -24,7 +24,7 @@ class LayerNormShiftPartitionOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid layernorm_shift_partition op to tensorrt "
VLOG(4) << "convert a layernorm_shift_partition op to tensorrt "
"layernorm_shift_partition plugin";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -34,7 +24,7 @@ class LeakyReluOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid leaky_relu op to tensorrt layer";
VLOG(4) << "convert leaky_relu op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -14,19 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace nvinfer1 {
class ILayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +23,7 @@ class LogSigmoidOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid LogSigmoid op to tensorrt layer";
VLOG(4) << "convert LogSigmoid op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/matmul_op_int8_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,7 +27,7 @@ class MatMulOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid matmul op to tensorrt matmul layer ";
VLOG(3) << "convert a matmul op to tensorrt matmul layer ";
framework::OpDesc op_desc(op, nullptr);
nvinfer1::ILayer* layer = nullptr;
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/matmul_op_int8_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -23,7 +23,7 @@ class MergeLayernormOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid merge_layernorm op to tensorrt merge_layernorm "
VLOG(4) << "convert a merge_layernorm op to tensorrt merge_layernorm "
"plugin";
framework::OpDesc op_desc(op, nullptr);
auto* X = engine_->GetITensor(op_desc.Input("X").front());
......
......@@ -15,29 +15,19 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/mish_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
/*
* Mish converter from fluid to tensorRT.
* Mish OP
*/
class MishOpConverter : public OpConverter {
public:
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid Mish op to tensorrt Mish plugin";
VLOG(4) << "convert mish op to tensorrt mish plugin";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -13,15 +13,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -31,7 +22,7 @@ class MultiClassNMS3OpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid multiclassNMS3 op to tensorrt plugin";
VLOG(3) << "convert a multiclassNMS3 op to tensorrt plugin";
// for now, only work for static shape and regular tensor
framework::OpDesc op_desc(op, nullptr);
......
......@@ -13,15 +13,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -31,7 +22,7 @@ class MultiClassNMSOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid multiclassNMS op to tensorrt plugin";
VLOG(3) << "convert a multiclassNMS op to tensorrt plugin";
// for now, only work for static shape and regular tensor
framework::OpDesc op_desc(op, nullptr);
......
......@@ -25,7 +25,7 @@ class MultiheadMatMulOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid multihead_mamul op to a corresponding tensorrt "
VLOG(3) << "convert a multihead_mamul op to a corresponding tensorrt "
"network structure";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -24,7 +24,7 @@ class MultiheadMatMulRoformerOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid multihead_mamul_roformer op to a corresponding "
VLOG(3) << "convert a multihead_mamul_roformer op to a corresponding "
"tensorrt "
"network structure";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -21,7 +21,7 @@ class NearestInterpolateOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid nearest_interp op";
VLOG(3) << "convert a nearest_interp op to tensorrt op";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -12,15 +12,6 @@ limitations under the License. */
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -30,7 +21,7 @@ class NearestInterpolateV2OpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid nearest_interp_v2 op";
VLOG(3) << "convert a nearest_interp_v2 op to tensorrt op";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -41,8 +41,8 @@ class OneHotOpConverter : public OpConverter {
framework::OpDesc op_desc(op, nullptr);
const auto indices_tensor = engine_->GetITensor(op_desc.Input("X").front());
nvinfer1::ITensor* values_tensor;
nvinfer1::ITensor* depth_tensor;
nvinfer1::ITensor* values_tensor{nullptr};
nvinfer1::ITensor* depth_tensor{nullptr};
const int dtype = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype"));
if (dtype == 2 || dtype == 3) { // int, int64
const std::vector<int> values_data = {0, 1};
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class PadOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid transpose op to tensorrt tranpose layer";
VLOG(3) << "convert a transpose op to tensorrt tranpose layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -67,8 +57,7 @@ class Pool2dOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc &op,
const framework::Scope &scope,
bool test_mode) override {
VLOG(4)
<< "convert a fluid pool2d op to tensorrt pool2d layer without bias";
VLOG(4) << "convert a pool2d op to tensorrt pool2d layer without bias";
framework::OpDesc op_desc(op, nullptr);
auto *input1 = engine_->GetITensor(op_desc.Input("X")[0]);
nvinfer1::Dims input_shape = input1->getDimensions();
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/pool3d_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -76,8 +66,7 @@ class Pool3dOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc &op,
const framework::Scope &scope,
bool test_mode) override {
VLOG(4)
<< "convert a fluid pool3d op to tensorrt pool3d layer without bias";
VLOG(3) << "convert a pool3d op to tensorrt pool3d layer without bias";
framework::OpDesc op_desc(op, nullptr);
auto *input1 = engine_->GetITensor(op_desc.Input("X")[0]);
nvinfer1::Dims input_shape = input1->getDimensions();
......
......@@ -14,15 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -33,7 +24,7 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(7000)
VLOG(4) << "convert fluid PrelnEmbEltwiseLayerNorm op to tensorrt layer";
VLOG(4) << "convert PrelnEmbEltwiseLayerNorm op to tensorrt layer";
// get the presistable var's data
auto GetWeight = [&](const std::string& var_name,
framework::DDim* dim) -> TensorRTEngine::Weight {
......
......@@ -16,15 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -34,7 +25,7 @@ class PrelnGroupnormActOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid preln_groupnorm_act op to tensorrt "
VLOG(4) << "convert a preln_groupnorm_act op to tensorrt "
"preln_groupnorm_act plugin";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -24,7 +24,7 @@ class PrelnLayerNormShiftPartitionOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid preln_layernorm_shift_partition op to tensorrt "
VLOG(4) << "convert a preln_layernorm_shift_partition op to tensorrt "
"preln_layernorm_shift_partition plugin";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -20,14 +20,14 @@ namespace inference {
namespace tensorrt {
/*
* PRelu converter from fluid to tensorRT.
* PRelu converter from paddle to tensorRT.
*/
class PReluOpConverter : public OpConverter {
public:
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid prelu op to tensorrt prelu layer";
VLOG(4) << "convert prelu op to tensorrt prelu layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/recover_padding_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -21,16 +21,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/remove_padding_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -11,15 +11,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -32,6 +23,7 @@ class ReshapeOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a paddle reshape op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/reverse_roll_op_plugin.h"
namespace paddle {
......
......@@ -24,7 +24,7 @@ class RnnNativeOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(7000)
VLOG(4) << "convert a fluid rnn op to tensorrt rnn layer";
VLOG(4) << "convert a rnn op to tensorrt rnn layer";
framework::OpDesc op_desc(op, nullptr);
// [seq_len, batch ,in_size],
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,7 +27,7 @@ class RoiAlignOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid roi align op to tensorrt plugin";
VLOG(3) << "convert a roi_align op to tensorrt plugin";
framework::OpDesc op_desc(op, nullptr);
std::string input_name = op_desc.Input("X").front();
......
......@@ -15,15 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -35,7 +26,7 @@ class RollOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid Roll op to tensorrt Slice layer";
VLOG(4) << "convert roll op to tensorrt Slice layer";
framework::OpDesc op_desc(op, nullptr);
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class ScaleOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid scale op to tensorrt mul layer without bias";
VLOG(3) << "convert a scale op to tensorrt mul layer without bias";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -23,7 +23,7 @@ class ShapeOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid shape op to tensorrt shape layer";
VLOG(3) << "convert a shape op to tensorrt shape layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,6 +26,7 @@ class ShuffleChannelOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a shuffle_channel op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -14,19 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace nvinfer1 {
class ILayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +23,7 @@ class SiluOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid silu op to tensorrt layer";
VLOG(4) << "convert silu op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -16,15 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -34,7 +25,7 @@ class SkipGroupnormActOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid skip_groupnorm_act op to tensorrt "
VLOG(4) << "convert a skip_groupnorm_act op to tensorrt "
"skip_groupnorm_act plugin";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -23,7 +23,7 @@ class SkipMergeLayernormOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid skip_merge_layernorm op to tensorrt "
VLOG(4) << "convert a skip_merge_layernorm op to tensorrt "
"skip_merge_layernorm "
"plugin";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -16,15 +16,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,8 +28,7 @@ class SoftMaxOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3)
<< "convert a fluid softmax op to tensorrt softmax layer without bias";
VLOG(3) << "convert a softmax op to tensorrt softmax layer without bias";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/spmm_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -39,7 +39,7 @@ class SparseMultiheadMatMulOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid sparse_multihead_matmul op to a corresponding "
VLOG(3) << "convert a sparse_multihead_matmul op to a corresponding "
"tensorrt "
"network structure";
framework::OpDesc op_desc(op, nullptr);
......
......@@ -24,7 +24,7 @@ class SplitOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid split op to tensorrt split layer";
VLOG(4) << "convert a split op to tensorrt split layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -25,7 +25,7 @@ class SquareOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
framework::OpDesc op_desc(op, nullptr);
VLOG(3) << "convert a fluid sqaure op to tensorrt layer ";
VLOG(3) << "convert a sqaure op to tensorrt layer ";
nvinfer1::ITensor* input_tensor =
engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -23,7 +23,7 @@ class Squeeze2OpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid squeeze2 op to tensorrt shuffle layer";
VLOG(4) << "convert a squeeze2 op to tensorrt shuffle layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -15,15 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/stack_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +27,7 @@ class StackOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid stack op to tensorrt stack layer";
VLOG(4) << "convert stack op to tensorrt stack layer";
framework::OpDesc op_desc(op, nullptr);
auto input = op_desc.Input("X");
......
......@@ -23,7 +23,7 @@ class SumOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid sum op to tensorrt sum layer";
VLOG(4) << "convert a sum op to tensorrt sum layer";
framework::OpDesc op_desc(op, nullptr);
nvinfer1::ILayer* layer = nullptr;
......
......@@ -15,19 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h"
namespace nvinfer1 {
class ILayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -37,7 +24,7 @@ class SwishOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert fluid swish op to tensorrt layer";
VLOG(4) << "convert swish op to tensorrt layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -14,19 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace nvinfer1 {
class ILayer;
} // namespace nvinfer1
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -24,7 +24,7 @@ class TileOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
#if IS_TRT_VERSION_GE(7000)
VLOG(4) << "convert a tile op to tensorrt tile layer";
VLOG(3) << "convert a tile op to tensorrt tile layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -23,16 +23,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -43,6 +33,7 @@ class TopKOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a top_k op to tensorrt TopK layer";
// Here the two nullptr looks strange, that's because the
// framework::OpDesc's constructor is strange.
framework::OpDesc op_desc(op, nullptr);
......
......@@ -8,6 +8,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/trans_layernorm_op_plugin.h"
namespace paddle {
......
......@@ -15,16 +15,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/transformer_input_output_convert_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......
......@@ -11,15 +11,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -32,6 +23,7 @@ class TransposeOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a transpose op to tensorrt shuffle layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
auto* input = engine_->GetITensor(op_desc.Input("X")[0]);
......
......@@ -23,16 +23,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -46,7 +36,7 @@ class UnaryOpConverter : public OpConverter {
// Here the two nullptr looks strange, that's because the
// framework::OpDesc's constructor is strange.
framework::OpDesc op_desc(op, nullptr);
VLOG(3) << "convert a fluid unary op to tensorrt unary layer whose "
VLOG(3) << "convert a unary op to tensorrt unary layer whose "
"type is "
<< op_type_;
nvinfer1::ITensor* input_tensor =
......
......@@ -23,7 +23,7 @@ class Unsqueeze2OpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(4) << "convert a fluid unsqueeze2 op to tensorrt shuffle layer";
VLOG(4) << "convert a unsqueeze2 op to tensorrt shuffle layer";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
......
......@@ -14,16 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -36,7 +26,7 @@ class WhereOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid where op to tensorrt where layer";
VLOG(3) << "convert a where op to tensorrt where layer";
framework::OpDesc op_desc(op, nullptr);
std::string input_x_name = op_desc.Input("X").front();
......
......@@ -14,15 +14,6 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h"
namespace paddle {
namespace framework {
class Scope;
namespace proto {
class OpDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace paddle {
namespace inference {
namespace tensorrt {
......@@ -32,7 +23,7 @@ class YoloBoxOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope,
bool test_mode) override {
VLOG(3) << "convert a fluid yolo box op to tensorrt plugin";
VLOG(3) << "convert a yolo box op to tensorrt plugin";
framework::OpDesc op_desc(op, nullptr);
std::string X = op_desc.Input("X").front();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册