split_op_plugin.h 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
N
nhzlx 已提交
14 15 16

#pragma once

H
hjchen2 已提交
17
#include <thrust/device_vector.h>
N
nhzlx 已提交
18
#include <utility>
19
#include <vector>
20
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
21 22 23 24

namespace paddle {
namespace inference {
namespace tensorrt {
25
namespace plugin {
N
nhzlx 已提交
26 27

class SplitPlugin : public PluginTensorRT {
28
 public:
N
nhzlx 已提交
29
  SplitPlugin() {}
30
  SplitPlugin(int axis, std::vector<int> const& output_lengths)
H
hjchen2 已提交
31
      : axis_(axis), same_shape_(true), output_length_(output_lengths) {}
32

33
  SplitPlugin(void const* serial_data, size_t serial_length) {
34 35 36 37 38
    deserializeBase(serial_data, serial_length);
    DeserializeValue(&serial_data, &serial_length, &axis_);
    DeserializeValue(&serial_data, &serial_length, &output_length_);
  }

39
  SplitPlugin* clone() const override {
40 41 42
    return new SplitPlugin(axis_, output_length_);
  }

43
  const char* getPluginType() const override { return "split_plugin"; }
44 45
  int getNbOutputs() const override { return output_length_.size(); }
  nvinfer1::Dims getOutputDimensions(int index,
46
                                     const nvinfer1::Dims* input_dims,
47 48 49
                                     int num_inputs) override;

  int initialize() override;
50 51
  int enqueue(int batchSize, const void* const* inputs, void** outputs,
              void* workspace, cudaStream_t stream) override;
N
nhzlx 已提交
52 53

 protected:
54
  size_t getSerializationSize() override {
N
nhzlx 已提交
55 56
    return SerializedSize(getPluginType()) + SerializedSize(axis_) +
           SerializedSize(output_length_) + getBaseSerializationSize();
N
nhzlx 已提交
57 58
  }

59
  void serialize(void* buffer) override {
N
nhzlx 已提交
60
    SerializeValue(&buffer, getPluginType());
N
nhzlx 已提交
61
    serializeBase(buffer);
N
nhzlx 已提交
62 63
    SerializeValue(&buffer, axis_);
    SerializeValue(&buffer, output_length_);
N
nhzlx 已提交
64 65
  }

66
  int axis_;
H
hjchen2 已提交
67 68
  int outer_rows_;
  int inner_cols_;
69
  int axis_shape_;
H
hjchen2 已提交
70
  bool same_shape_;
71 72
  std::vector<int> output_length_;
  std::vector<int> segment_offsets_;
H
hjchen2 已提交
73
  thrust::device_vector<int> d_segment_offsets_;
74
  thrust::device_vector<float*> d_output_ptrs_;
N
nhzlx 已提交
75 76
};

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
#if IS_TRT_VERSION_GE(6000)
class SplitPluginDynamic : public DynamicPluginTensorRT {
 public:
  SplitPluginDynamic(int axis, std::vector<int> const& output_lengths)
      : axis_(axis), output_length_(output_lengths) {}

  SplitPluginDynamic(void const* serial_data, size_t serial_length) {}

  nvinfer1::IPluginV2DynamicExt* clone() const override {
    return new SplitPluginDynamic(axis_, output_length_);
  }

  const char* getPluginType() const override { return "split_plugin"; }
  int getNbOutputs() const override { return output_length_.size(); }
  int initialize() override;

  size_t getSerializationSize() const override;
  void serialize(void* buffer) const override;

  nvinfer1::DimsExprs getOutputDimensions(
      int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
      nvinfer1::IExprBuilder& exprBuilder) override;

  bool supportsFormatCombination(int pos,
                                 const nvinfer1::PluginTensorDesc* inOut,
                                 int nbInputs, int nbOutputs) override;

  void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in,
                       int nbInputs,
                       const nvinfer1::DynamicPluginTensorDesc* out,
                       int nbOutputs) override {}

  size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs,
                          int nbInputs,
                          const nvinfer1::PluginTensorDesc* outputs,
                          int nbOutputs) const override {
    return 0;
  }

  int enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
              const nvinfer1::PluginTensorDesc* outputDesc,
              const void* const* inputs, void* const* outputs, void* workspace,
              cudaStream_t stream) override;
  nvinfer1::DataType getOutputDataType(int index,
                                       const nvinfer1::DataType* inputTypes,
                                       int nbInputs) const override;

  void destroy() override { delete this; }

 private:
  int axis_;
  std::vector<int> output_length_;
};
#endif

132 133 134 135
}  // namespace plugin
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle