split_op_plugin.h 2.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
N
nhzlx 已提交
14 15

#pragma once
N
nhzlx 已提交
16
#include <vector>
17
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"
N
nhzlx 已提交
18 19 20 21 22 23 24

namespace paddle {
namespace inference {
namespace tensorrt {

class SplitPlugin : public PluginTensorRT {
  int axis_;
25
  std::vector<int> output_length_;
N
nhzlx 已提交
26
  int nx_, ny_, nz_;
27
  std::vector<int> segment_offsets_;
N
nhzlx 已提交
28 29

 protected:
N
nhzlx 已提交
30
  size_t getSerializationSize() override {
N
nhzlx 已提交
31
    return SerializedSize(axis_) + SerializedSize(output_length_) +
32
           getBaseSerializationSize();
N
nhzlx 已提交
33 34
  }

N
nhzlx 已提交
35 36 37
  // TRT will call this func when we need to serialize the configuration of
  // tensorrt.
  // It should not be called by users.
N
nhzlx 已提交
38
  void serialize(void *buffer) override {
N
nhzlx 已提交
39
    serializeBase(buffer);
N
nhzlx 已提交
40 41
    SerializeValue(&buffer, axis_);
    SerializeValue(&buffer, output_length_);
N
nhzlx 已提交
42 43 44
  }

 public:
45 46 47 48 49
  SplitPlugin(int axis, std::vector<int> const &output_lengths)
      : axis_(axis), output_length_(output_lengths) {
    assert(axis <= nvinfer1::Dims::MAX_DIMS);
  }

N
nhzlx 已提交
50 51
  // It was used for tensorrt deserialization.
  // It should not be called by users.
52
  SplitPlugin(void const *serialData, size_t serialLength) {
N
nhzlx 已提交
53
    deserializeBase(serialData, serialLength);
N
nhzlx 已提交
54 55
    DeserializeValue(&serialData, &serialLength, &axis_);
    DeserializeValue(&serialData, &serialLength, &output_length_);
N
nhzlx 已提交
56 57
  }

58 59
  SplitPlugin *clone() const override {
    return new SplitPlugin(axis_, output_length_);
N
nhzlx 已提交
60 61
  }

N
nhzlx 已提交
62 63 64 65 66 67 68
  const char *getPluginType() const override { return "split"; }
  int getNbOutputs() const override { return output_length_.size(); }
  nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims *inputs,
                                     int nbInputDims) override;
  int initialize() override;
  int enqueue(int batchSize, const void *const *inputs, void **outputs,
              void *workspace, cudaStream_t stream) override;
N
nhzlx 已提交
69 70
};

N
nhzlx 已提交
71 72 73
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle