prelu_op_plugin.h 5.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

N
nhzlx 已提交
17
#include <algorithm>
18
#include <string>
N
nhzlx 已提交
19 20 21 22
#include <vector>
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"

23 24 25 26 27 28
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"

namespace paddle {
namespace inference {
namespace tensorrt {
29
namespace plugin {
30 31

class PReluPlugin : public PluginTensorRT {
N
nhzlx 已提交
32
  std::vector<float> weight_;
33
  float* p_gpu_weight_;
34 35 36 37
  std::string mode_;

 protected:
  size_t getSerializationSize() override {
N
nhzlx 已提交
38 39
    return getBaseSerializationSize() + SerializedSize(mode_.c_str()) +
           SerializedSize(weight_) + SerializedSize(getPluginType());
40 41 42 43 44
  }

  // TRT will call this func when we need to serialize the configuration of
  // tensorrt.
  // It should not be called by users.
45
  void serialize(void* buffer) override {
N
nhzlx 已提交
46 47 48 49
    SerializeValue(&buffer, getPluginType());
    serializeBase(buffer);
    SerializeValue(&buffer, weight_);
    SerializeValue(&buffer, mode_.c_str());
50 51 52
  }

 public:
53 54
  PReluPlugin(const float* weight, const int weight_num,
              std::string const& mode)
N
nhzlx 已提交
55 56 57 58
      : mode_(mode) {
    weight_.resize(weight_num);
    std::copy(weight, weight + weight_num, weight_.data());
  }
59 60 61

  // It was used for tensorrt deserialization.
  // It should not be called by users.
62
  PReluPlugin(void const* serialData, size_t serialLength) {
N
nhzlx 已提交
63 64
    deserializeBase(serialData, serialLength);
    DeserializeValue(&serialData, &serialLength, &weight_);
65
    const char* prelu_mode;
N
nhzlx 已提交
66 67
    DeserializeValue(&serialData, &serialLength, &prelu_mode);
    mode_ = std::string(prelu_mode);
68
  }
N
nhzlx 已提交
69 70
  ~PReluPlugin() { cudaFree(p_gpu_weight_); }
  int initialize() override;
71

72
  PReluPlugin* clone() const override {
N
nhzlx 已提交
73 74
    return new PReluPlugin(weight_.data(), weight_.size(), mode_);
  }
75

76
  const char* getPluginType() const override { return "prelu_plugin"; }
77
  int getNbOutputs() const override { return 1; }
78
  nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs,
79
                                     int nbInputDims) override;
80 81
  int enqueue(int batchSize, const void* const* inputs, void** outputs,
              void* workspace, cudaStream_t stream) override;
82 83
};

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
#if IS_TRT_VERSION_GE(6000)
class PReluPluginDynamic : public DynamicPluginTensorRT {
 public:
  PReluPluginDynamic(const float* weight, const int weight_num,
                     std::string const& mode)
      : mode_(mode) {
    weight_.resize(weight_num);
    std::copy(weight, weight + weight_num, weight_.data());
  }

  // It was used for tensorrt deserialization.
  // It should not be called by users.
  PReluPluginDynamic(void const* serialData, size_t serialLength) {
    deserializeBase(serialData, serialLength);
    DeserializeValue(&serialData, &serialLength, &weight_);
    const char* prelu_mode;
    DeserializeValue(&serialData, &serialLength, &prelu_mode);
    mode_ = std::string(prelu_mode);
  }
  ~PReluPluginDynamic() { cudaFree(p_gpu_weight_); }
  nvinfer1::IPluginV2DynamicExt* clone() const override {
    return new PReluPluginDynamic(weight_.data(), weight_.size(), mode_);
  }

  const char* getPluginType() const override { return "prelu_plugin"; }
  int getNbOutputs() const override { return 1; }
  int initialize() override;

  size_t getSerializationSize() const override;
  void serialize(void* buffer) const override;

  nvinfer1::DimsExprs getOutputDimensions(
      int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs,
      nvinfer1::IExprBuilder& expr_builder) override;

  bool supportsFormatCombination(int pos,
                                 const nvinfer1::PluginTensorDesc* inOut,
                                 int nbInputs, int nbOutputs) override;

  void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in,
                       int nbInputs,
                       const nvinfer1::DynamicPluginTensorDesc* out,
                       int nbOutputs) override {}

  size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs,
                          int nbInputs,
                          const nvinfer1::PluginTensorDesc* outputs,
                          int nbOutputs) const override {
    return 0;
  }

  int enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
              const nvinfer1::PluginTensorDesc* outputDesc,
              const void* const* inputs, void* const* outputs, void* workspace,
              cudaStream_t stream) override;
  nvinfer1::DataType getOutputDataType(int index,
                                       const nvinfer1::DataType* inputTypes,
                                       int nbInputs) const override;

  void destroy() override { delete this; }

 private:
  std::vector<float> weight_;
  float* p_gpu_weight_;
  std::string mode_;
};
#endif

152
}  // namespace plugin
153 154 155
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle