split_op_plugin.h 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
N
nhzlx 已提交
14 15 16

#pragma once

H
hjchen2 已提交
17
#include <thrust/device_vector.h>
18
#include <string>
N
nhzlx 已提交
19
#include <utility>
20
#include <vector>
21
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
22 23 24 25

namespace paddle {
namespace inference {
namespace tensorrt {
26
namespace plugin {
N
nhzlx 已提交
27 28

class SplitPlugin : public PluginTensorRT {
29
 public:
N
nhzlx 已提交
30
  SplitPlugin() {}
31 32 33 34
  SplitPlugin(int axis, std::vector<int> const& output_lengths, bool with_fp16)
      : axis_(axis), same_shape_(true), output_length_(output_lengths) {
    with_fp16_ = with_fp16;
  }
35

36
  SplitPlugin(void const* serial_data, size_t serial_length) {
37 38 39 40 41
    deserializeBase(serial_data, serial_length);
    DeserializeValue(&serial_data, &serial_length, &axis_);
    DeserializeValue(&serial_data, &serial_length, &output_length_);
  }

42
  SplitPlugin* clone() const override {
43 44 45
    auto* ptr = new SplitPlugin(axis_, output_length_, with_fp16_);
    ptr->shareData(this);
    return ptr;
46 47
  }

48
  const char* getPluginType() const override { return "split_plugin"; }
49 50
  int getNbOutputs() const override { return output_length_.size(); }
  nvinfer1::Dims getOutputDimensions(int index,
51
                                     const nvinfer1::Dims* input_dims,
52 53 54
                                     int num_inputs) override;

  int initialize() override;
55
  void terminate() override;
56 57
  int enqueue(int batchSize, const void* const* inputs, void** outputs,
              void* workspace, cudaStream_t stream) override;
N
nhzlx 已提交
58 59

 protected:
60
  size_t getSerializationSize() override {
N
nhzlx 已提交
61 62
    return SerializedSize(getPluginType()) + SerializedSize(axis_) +
           SerializedSize(output_length_) + getBaseSerializationSize();
N
nhzlx 已提交
63 64
  }

65
  void serialize(void* buffer) override {
N
nhzlx 已提交
66
    SerializeValue(&buffer, getPluginType());
N
nhzlx 已提交
67
    serializeBase(buffer);
N
nhzlx 已提交
68 69
    SerializeValue(&buffer, axis_);
    SerializeValue(&buffer, output_length_);
N
nhzlx 已提交
70 71
  }

72
  int axis_;
H
hjchen2 已提交
73 74
  int outer_rows_;
  int inner_cols_;
75
  int axis_shape_;
H
hjchen2 已提交
76
  bool same_shape_;
77 78
  std::vector<int> output_length_;
  std::vector<int> segment_offsets_;
H
hjchen2 已提交
79
  thrust::device_vector<int> d_segment_offsets_;
80
  thrust::device_vector<float*> d_output_ptrs_;
81 82 83

 private:
  void shareData(const SplitPlugin* another);
N
nhzlx 已提交
84 85
};

86 87 88
#if IS_TRT_VERSION_GE(6000)
class SplitPluginDynamic : public DynamicPluginTensorRT {
 public:
89 90 91 92 93
  SplitPluginDynamic(int axis, std::vector<int> const& output_lengths,
                     bool with_fp16)
      : axis_(axis), output_length_(output_lengths) {
    with_fp16_ = with_fp16;
  }
94

95 96 97 98 99
  SplitPluginDynamic(void const* serial_data, size_t serial_length) {
    DeserializeValue(&serial_data, &serial_length, &axis_);
    DeserializeValue(&serial_data, &serial_length, &output_length_);
    DeserializeValue(&serial_data, &serial_length, &with_fp16_);
  }
100 101

  nvinfer1::IPluginV2DynamicExt* clone() const override {
102
    return new SplitPluginDynamic(axis_, output_length_, with_fp16_);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
  }

  const char* getPluginType() const override { return "split_plugin"; }
  int getNbOutputs() const override { return output_length_.size(); }
  int initialize() override;

  size_t getSerializationSize() const override;
  void serialize(void* buffer) const override;

  nvinfer1::DimsExprs getOutputDimensions(
      int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
      nvinfer1::IExprBuilder& exprBuilder) override;

  bool supportsFormatCombination(int pos,
                                 const nvinfer1::PluginTensorDesc* inOut,
                                 int nbInputs, int nbOutputs) override;

  void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in,
                       int nbInputs,
                       const nvinfer1::DynamicPluginTensorDesc* out,
                       int nbOutputs) override {}

  size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs,
                          int nbInputs,
                          const nvinfer1::PluginTensorDesc* outputs,
                          int nbOutputs) const override {
    return 0;
  }

  int enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
              const nvinfer1::PluginTensorDesc* outputDesc,
              const void* const* inputs, void* const* outputs, void* workspace,
              cudaStream_t stream) override;
  nvinfer1::DataType getOutputDataType(int index,
                                       const nvinfer1::DataType* inputTypes,
                                       int nbInputs) const override;

  void destroy() override { delete this; }

 private:
  int axis_;
  std::vector<int> output_length_;
};
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

class SplitPluginV2Creator : public nvinfer1::IPluginCreator {
 public:
  SplitPluginV2Creator() {}
  const char* getPluginName() const override { return "split_plugin"; }

  const char* getPluginVersion() const override { return "1"; }

  const nvinfer1::PluginFieldCollection* getFieldNames() override {
    return &field_collection_;
  }

  nvinfer1::IPluginV2* createPlugin(
      const char* name, const nvinfer1::PluginFieldCollection* fc) override {
    return nullptr;
  }

  nvinfer1::IPluginV2* deserializePlugin(const char* name,
                                         const void* serial_data,
                                         size_t serial_length) override {
    auto plugin = new SplitPluginDynamic(serial_data, serial_length);
    return plugin;
  }

  void setPluginNamespace(const char* lib_namespace) override {
    plugin_namespace_ = lib_namespace;
  }

  const char* getPluginNamespace() const override {
    return plugin_namespace_.c_str();
  }

 private:
  std::string plugin_namespace_;
  std::string plugin_name_;
  nvinfer1::PluginFieldCollection field_collection_{0, nullptr};
  std::vector<nvinfer1::PluginField> plugin_attributes_;
};

REGISTER_TRT_PLUGIN_V2(SplitPluginV2Creator);
186 187
#endif

188 189 190 191
}  // namespace plugin
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle