mul_op.cc 2.4 KB
Newer Older
L
Luo Tao 已提交
1 2 3 4 5 6
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

L
Luo Tao 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
L
Luo Tao 已提交
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
Z
zlsh80826 已提交
16
#include "paddle/fluid/inference/tensorrt/plugin/convert_mask_plugin.h"
L
Luo Tao 已提交
17 18 19 20 21

namespace paddle {
namespace inference {
namespace tensorrt {

Y
Yan Chunwei 已提交
22 23 24
/*
 * MulOp, IMatrixMultiplyLayer in TRT. This Layer doesn't has weights.
 */
L
Luo Tao 已提交
25 26
class MulOpConverter : public OpConverter {
 public:
27
  void operator()(const framework::proto::OpDesc& op,
28
                  const framework::Scope& scope, bool test_mode) override {
29
    VLOG(3) << "convert a fluid mul op to tensorrt mul layer without bias";
Y
Yan Chunwei 已提交
30

F
fengjiayi 已提交
31
    framework::OpDesc op_desc(op, nullptr);
Y
Yan Chunwei 已提交
32 33 34
    // Declare inputs
    auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]);
    auto* input2 = engine_->GetITensor(op_desc.Input("Y")[0]);
Z
zlsh80826 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48

    bool transpose_x = BOOST_GET_CONST(bool, op_desc.GetAttr("transpose_X"));
    bool transpose_y = BOOST_GET_CONST(bool, op_desc.GetAttr("transpose_Y"));

#ifdef USE_NVINFER_PLUGIN
    nvinfer1::DataType type = (engine_->WithFp16() == 1)
                                  ? nvinfer1::DataType::kHALF
                                  : nvinfer1::DataType::kFLOAT;
    plugin::ConvertMaskPluginDynamic* plugin =
        new plugin::ConvertMaskPluginDynamic(type);
    auto convert_mask_layer = engine_->AddPluginV2(&input1, 1, plugin);
    engine_->SetITensor("qkv_plugin_mask", convert_mask_layer->getOutput(0));
#endif

Y
Yan Chunwei 已提交
49 50
    // Both the input1 and input2 do not need transpose.
    auto* layer = TRT_ENGINE_ADD_LAYER(
Z
zlsh80826 已提交
51 52
        engine_, MatrixMultiply, *const_cast<nvinfer1::ITensor*>(input1),
        transpose_x, *const_cast<nvinfer1::ITensor*>(input2), transpose_y);
Y
Yan Chunwei 已提交
53

54
    auto output_name = op_desc.Output("Out")[0];
Z
zlsh80826 已提交
55
    RreplenishLayerAndOutput(layer, "matmul", {output_name}, test_mode);
L
Luo Tao 已提交
56 57
  }
};
L
Luo Tao 已提交
58

L
Luo Tao 已提交
59 60 61
}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle
62

Z
zlsh80826 已提交
63
REGISTER_TRT_OP_CONVERTER(matmul, MulOpConverter);