elementwise_op_plugin.h 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

N
nhzlx 已提交
17
#include <string>
18 19 20 21 22 23 24 25 26 27
#include <vector>
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h"

namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {

class ElementWisePlugin : public PluginTensorRT {
 public:
28 29
  ElementWisePlugin(std::string type, nvinfer1::Dims const& dims_x,
                    nvinfer1::Dims const& dims_y, int axis)
30 31 32 33 34 35 36 37
      : type_(type),
        dims_x_(dims_x),
        dims_y_(dims_y),
        axis_(axis),
        prev_size_(1),
        midd_size_(1),
        post_size_(1) {}

38
  ElementWisePlugin(void const* serial_data, size_t serial_length) {
39
    deserializeBase(serial_data, serial_length);
40
    const char* elementwise_type;
N
nhzlx 已提交
41 42
    DeserializeValue(&serial_data, &serial_length, &elementwise_type);
    type_ = std::string(elementwise_type);
43 44 45 46 47
    DeserializeValue(&serial_data, &serial_length, &axis_);
    DeserializeValue(&serial_data, &serial_length, &dims_x_);
    DeserializeValue(&serial_data, &serial_length, &dims_y_);
  }

48
  ElementWisePlugin* clone() const override {
49 50 51 52
    // return new ElementWisePlugin(dims_x_, dims_y_, axis_);
    return nullptr;
  }

53
  const char* getPluginType() const override { return "elementwise_plugin"; }
54 55

  nvinfer1::Dims getOutputDimensions(int index,
56
                                     const nvinfer1::Dims* input_dims,
57 58 59 60 61
                                     int num_inputs) override;

  int initialize() override;

  // execute the layer
62 63
  int enqueue(int batch_size, const void* const* inputs, void** outputs,
              void* workspace, cudaStream_t stream);
64 65 66

 protected:
  size_t getSerializationSize() override {
N
nhzlx 已提交
67 68 69
    return SerializedSize(getPluginType()) + SerializedSize(axis_) +
           SerializedSize(dims_x_) + SerializedSize(dims_y_) +
           getBaseSerializationSize();
70 71
  }

72
  void serialize(void* buffer) override {
N
nhzlx 已提交
73
    SerializeValue(&buffer, getPluginType());
74
    serializeBase(buffer);
N
nhzlx 已提交
75
    SerializeValue(&buffer, type_.c_str());
76 77 78 79 80
    SerializeValue(&buffer, axis_);
    SerializeValue(&buffer, dims_x_);
    SerializeValue(&buffer, dims_y_);
  }

N
nhzlx 已提交
81
  std::string type_;
82 83 84 85 86 87 88 89
  nvinfer1::Dims dims_x_;
  nvinfer1::Dims dims_y_;
  int axis_;
  int prev_size_;
  int midd_size_;
  int post_size_;
};

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
#if IS_TRT_VERSION_GE(6000)
class ElementwisePluginDynamic : public DynamicPluginTensorRT {
 public:
  explicit ElementwisePluginDynamic(const std::string& type, int axis)
      : type_(type), axis_(axis) {}
  ElementwisePluginDynamic(void const* serialData, size_t serialLength) {}
  nvinfer1::IPluginV2DynamicExt* clone() const override {
    return new ElementwisePluginDynamic(type_, axis_);
  }

  const char* getPluginType() const override { return "elementwise_plugin"; }
  int getNbOutputs() const override { return 1; }
  int initialize() override;

  size_t getSerializationSize() const override;
  void serialize(void* buffer) const override;

  nvinfer1::DimsExprs getOutputDimensions(
      int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs,
      nvinfer1::IExprBuilder& expr_builder) override;

  bool supportsFormatCombination(int pos,
                                 const nvinfer1::PluginTensorDesc* inOut,
                                 int nbInputs, int nbOutputs) override;

  void configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in,
                       int nbInputs,
                       const nvinfer1::DynamicPluginTensorDesc* out,
                       int nbOutputs) override {}

  size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs,
                          int nbInputs,
                          const nvinfer1::PluginTensorDesc* outputs,
                          int nbOutputs) const override {
    return 0;
  }

  int enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
              const nvinfer1::PluginTensorDesc* outputDesc,
              const void* const* inputs, void* const* outputs, void* workspace,
              cudaStream_t stream) override;
  nvinfer1::DataType getOutputDataType(int index,
                                       const nvinfer1::DataType* inputTypes,
                                       int nbInputs) const override;

  void destroy() override { delete this; }

 private:
  std::string type_;
  int axis_;
};
#endif

143 144 145 146
}  // namespace plugin
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle