op_teller.cc 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/tensorrt/op_teller.h"
16
#include "paddle/fluid/framework/block_desc.h"
17

W
wanghuancoder 已提交
18 19 20 21 22 23
namespace paddle {
namespace framework {
class OpDesc;
}  // namespace framework
}  // namespace paddle

24 25 26 27 28 29
namespace paddle {
namespace inference {
namespace tensorrt {

// Just tell by the op_types.
struct SimpleOpTypeSetTeller : public Teller {
30 31 32
  SimpleOpTypeSetTeller() {
#if IS_TRT_VERSION_GE(5130)
    teller_set.insert("relu6");
33
    teller_set.insert("hard_sigmoid");
P
Pei Yang 已提交
34
    teller_set.insert("clip");
35 36
    int8_teller_set.insert("relu6");
    int8_teller_set.insert("hard_sigmoid");
P
Pei Yang 已提交
37
    int8_teller_set.insert("clip");
38 39 40 41 42
#endif
#if IS_TRT_VERSION_GE(6000)
    teller_set.insert("fused_embedding_eltwise_layernorm");
    teller_set.insert("multihead_matmul");
    teller_set.insert("skip_layernorm");
43
    teller_set.insert("slice");
44 45 46
#endif
#if IS_TRT_VERSION_GE(7130)
    teller_set.insert("group_norm");
47 48
#endif
  }
49

50 51 52 53 54 55 56
  bool operator()(const std::string& op_type, const framework::OpDesc& desc,
                  bool use_no_calib_int8) override {
    if (use_no_calib_int8) {
      return int8_teller_set.count(op_type);
    } else {
      return teller_set.count(op_type);
    }
57 58 59
  }

 private:
60
  // use this set for no calib int8.
61 62
  std::unordered_set<std::string> int8_teller_set{"mul",
                                                  "conv2d",
63
                                                  "conv2d_fusion",
64 65 66 67
                                                  "pool2d",
                                                  "relu",
                                                  "depthwise_conv2d",
                                                  "softmax",
68
                                                  "sigmoid",
69 70 71 72
                                                  "batch_norm",
                                                  "elementwise_add",
                                                  "leaky_relu",
                                                  "fc",
73 74 75
                                                  "concat",
                                                  "scale",
                                                  "elementwise_mul",
76 77
                                                  "conv2d_transpose",
                                                  "hard_swish"};
78
  std::unordered_set<std::string> teller_set{
79
      "mul",
80
      "matmul",
81
      "conv2d",
82
      "conv2d_fusion",
83 84 85 86
      "pool2d",
      "relu",
      "softmax",
      "sigmoid",
87
      "hard_swish",
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
      "depthwise_conv2d",
      "batch_norm",
      "concat",
      "tanh",
      "pad",
      "elementwise_add",
      "elementwise_mul",
      "dropout",
      "prelu",
      "conv2d_transpose",
      "leaky_relu",
      "fc",
      "shuffle_channel",
      "swish",
      "split",
      "instance_norm",
      "gelu",
      "layer_norm",
106
      "scale",
107
      "stack",
108 109 110 111
      "transpose2",
      "transpose",
      "flatten2",
      "flatten",
112
  };
113 114
};

115 116 117 118
bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
                    bool with_dynamic_shape) {
  const std::string op_type = node->Op()->Type();
  const framework::OpDesc desc = *node->Op();
119
  // do not support the op which is labeled the `skip_quant`
120
  if ((desc.HasAttr("namescope") &&
121
       BOOST_GET_CONST(std::string, desc.GetAttr("op_namescope")) ==
122 123
           "/skip_quant_2/") ||
      desc.HasAttr("skip_quant"))
124
    return false;
125

126
  for (auto& teller : tellers_) {
127 128 129
    if (op_type == "pool2d" || op_type == "conv2d" ||
        op_type == "depthwise_conv2d" || op_type == "conv2d_transpose") {
      std::vector<int> paddings =
130
          BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));
131 132 133 134 135 136 137 138

      std::string padding_algorithm = "EXPLICIT";
      if (desc.HasAttr("padding_algorithm"))
        padding_algorithm =
            BOOST_GET_CONST(std::string, desc.GetAttr("padding_algorithm"));
      if (paddings.size() > 2 ||
          (padding_algorithm == "SAME" && op_type != "pool2d"))
        return false;
139
    }
140 141 142 143 144 145 146
    if (op_type == "matmul") {
      auto* block = desc.Block();
      for (auto& param_name : desc.Inputs()) {
        for (auto& var_name : param_name.second) {
          auto* var_desc = block->FindVar(var_name);
          const auto shape = var_desc->GetShape();
          if (shape.size() < 3) {
P
Pei Yang 已提交
147 148 149
            VLOG(1)
                << "matmul op dims < 3 not supported in tensorrt, but got dims "
                << shape.size() << ", so jump it.";
150 151 152 153 154
            return false;
          }
        }
      }
    }
155
    if (op_type == "group_norm") {
156
      if (!with_dynamic_shape) return false;
157 158 159 160 161 162 163 164 165 166 167 168 169 170
      bool has_attrs = (desc.HasAttr("epsilon") && desc.HasAttr("groups"));
      if (has_attrs == false) return false;

      auto registry = GetPluginRegistry();
      if (registry == nullptr) return false;
    }
    if (op_type == "concat") {
      if (!desc.HasAttr("axis")) {
        return false;
      } else {
        int axis = BOOST_GET_CONST(int, desc.GetAttr("axis"));
        if (axis <= 0) return false;
      }
    }
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
    if (op_type == "transpose2" || op_type == "transpose") {
      if (!desc.HasAttr("axis")) {
        return false;
      } else {
        std::vector<int> axis =
            BOOST_GET_CONST(std::vector<int>, desc.GetAttr("axis"));
        if (!with_dynamic_shape && axis[0] != 0) return false;
        if (axis.size() >= nvinfer1::Dims::MAX_DIMS) return false;
      }
    }
    if (op_type == "flatten2" || op_type == "flatten") {
      // flatten doesn't support dynamic shape currently
      if (!desc.HasAttr("axis")) {
        return false;
      } else {
        if (with_dynamic_shape) return false;
        int axis = BOOST_GET_CONST(int, desc.GetAttr("axis"));
        if (axis != 1) return false;
      }
    }
191
    if ((*teller)(op_type, desc, use_no_calib_int8)) return true;
192 193 194 195 196 197 198 199 200
  }
  return false;
}

OpTeller::OpTeller() { tellers_.emplace_back(new SimpleOpTypeSetTeller); }

}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle