split_op_plugin.h 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
N
nhzlx 已提交
14 15 16

#pragma once

H
hjchen2 已提交
17
#include <thrust/device_vector.h>
18
#include <string>
N
nhzlx 已提交
19
#include <utility>
20
#include <vector>
21
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
22 23 24 25

namespace paddle {
namespace inference {
namespace tensorrt {
26
namespace plugin {
N
nhzlx 已提交
27 28

class SplitPlugin : public PluginTensorRT {
29
 public:
N
nhzlx 已提交
30
  SplitPlugin() {}
31 32 33 34
  SplitPlugin(int axis, std::vector<int> const& output_lengths, bool with_fp16)
      : axis_(axis), same_shape_(true), output_length_(output_lengths) {
    with_fp16_ = with_fp16;
  }
35

36
  SplitPlugin(void const* serial_data, size_t serial_length) {
37 38 39 40 41
    deserializeBase(serial_data, serial_length);
    DeserializeValue(&serial_data, &serial_length, &axis_);
    DeserializeValue(&serial_data, &serial_length, &output_length_);
  }

42
  SplitPlugin* clone() const override {
43
    return new SplitPlugin(axis_, output_length_, with_fp16_);
44 45
  }

46
  const char* getPluginType() const override { return "split_plugin"; }
47 48
  int getNbOutputs() const override { return output_length_.size(); }
  nvinfer1::Dims getOutputDimensions(int index,
49
                                     const nvinfer1::Dims* input_dims,
50 51 52
                                     int num_inputs) override;

  int initialize() override;
53 54
  int enqueue(int batchSize, const void* const* inputs, void** outputs,
              void* workspace, cudaStream_t stream) override;
N
nhzlx 已提交
55 56

 protected:
57
  size_t getSerializationSize() override {
N
nhzlx 已提交
58 59
    return SerializedSize(getPluginType()) + SerializedSize(axis_) +
           SerializedSize(output_length_) + getBaseSerializationSize();
N
nhzlx 已提交
60 61
  }

62
  void serialize(void* buffer) override {
N
nhzlx 已提交
63
    SerializeValue(&buffer, getPluginType());
N
nhzlx 已提交
64
    serializeBase(buffer);
N
nhzlx 已提交
65 66
    SerializeValue(&buffer, axis_);
    SerializeValue(&buffer, output_length_);
N
nhzlx 已提交
67 68
  }

69
  int axis_;
H
hjchen2 已提交
70 71
  int outer_rows_;
  int inner_cols_;
72
  int axis_shape_;
H
hjchen2 已提交
73
  bool same_shape_;
74 75
  std::vector<int> output_length_;
  std::vector<int> segment_offsets_;
H
hjchen2 已提交
76
  thrust::device_vector<int> d_segment_offsets_;
77
  thrust::device_vector<float*> d_output_ptrs_;
N
nhzlx 已提交
78 79
};

80 81 82
#if IS_TRT_VERSION_GE(6000)
class SplitPluginDynamic : public DynamicPluginTensorRT {
 public:
83 84 85 86 87
  SplitPluginDynamic(int axis, std::vector<int> const& output_lengths,
                     bool with_fp16)
      : axis_(axis), output_length_(output_lengths) {
    with_fp16_ = with_fp16;
  }
88

89 90 91 92 93
  SplitPluginDynamic(void const* serial_data, size_t serial_length) {
    DeserializeValue(&serial_data, &serial_length, &axis_);
    DeserializeValue(&serial_data, &serial_length, &output_length_);
    DeserializeValue(&serial_data, &serial_length, &with_fp16_);
  }
94 95

  nvinfer1::IPluginV2DynamicExt* clone() const override {
96
    return new SplitPluginDynamic(axis_, output_length_, with_fp16_);
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
  }

  const char* getPluginType() const override { return "split_plugin"; }
  int getNbOutputs() const override { return output_length_.size(); }
  int initialize() override;

  size_t getSerializationSize() const override;
  void serialize(void* buffer) const override;

  nvinfer1::DimsExprs getOutputDimensions(
      int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
      nvinfer1::IExprBuilder& exprBuilder) override;

  bool supportsFormatCombination(int pos,
                                 const nvinfer1::PluginTensorDesc* inOut,
                                 int nbInputs, int nbOutputs) override;

  void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in,
                       int nbInputs,
                       const nvinfer1::DynamicPluginTensorDesc* out,
                       int nbOutputs) override {}

  size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs,
                          int nbInputs,
                          const nvinfer1::PluginTensorDesc* outputs,
                          int nbOutputs) const override {
    return 0;
  }

  int enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
              const nvinfer1::PluginTensorDesc* outputDesc,
              const void* const* inputs, void* const* outputs, void* workspace,
              cudaStream_t stream) override;
  nvinfer1::DataType getOutputDataType(int index,
                                       const nvinfer1::DataType* inputTypes,
                                       int nbInputs) const override;

  void destroy() override { delete this; }

 private:
  int axis_;
  std::vector<int> output_length_;
};
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

class SplitPluginV2Creator : public nvinfer1::IPluginCreator {
 public:
  SplitPluginV2Creator() {}
  const char* getPluginName() const override { return "split_plugin"; }

  const char* getPluginVersion() const override { return "1"; }

  const nvinfer1::PluginFieldCollection* getFieldNames() override {
    return &field_collection_;
  }

  nvinfer1::IPluginV2* createPlugin(
      const char* name, const nvinfer1::PluginFieldCollection* fc) override {
    return nullptr;
  }

  nvinfer1::IPluginV2* deserializePlugin(const char* name,
                                         const void* serial_data,
                                         size_t serial_length) override {
    auto plugin = new SplitPluginDynamic(serial_data, serial_length);
    return plugin;
  }

  void setPluginNamespace(const char* lib_namespace) override {
    plugin_namespace_ = lib_namespace;
  }

  const char* getPluginNamespace() const override {
    return plugin_namespace_.c_str();
  }

 private:
  std::string plugin_namespace_;
  std::string plugin_name_;
  nvinfer1::PluginFieldCollection field_collection_{0, nullptr};
  std::vector<nvinfer1::PluginField> plugin_attributes_;
};

REGISTER_TRT_PLUGIN_V2(SplitPluginV2Creator);
180 181
#endif

182 183 184 185
}  // namespace plugin
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle