fc_op.cc 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"

namespace paddle {
namespace inference {
namespace tensorrt {

// Reorder the elements from istrides to ostrides, borrowed from TRT convert in
// tensorflow.
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorrt/convert/convert_nodes.cc#L318
template <typename T>
void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
              T* odata, nvinfer1::DimsHW ostrides) {
  for (int h = 0; h < shape.h(); ++h) {
    for (int w = 0; w < shape.w(); ++w) {
      odata[h * ostrides.h() + w * ostrides.w()] =
30
          idata[h * istrides.h() + w * istrides.w()];
31 32 33
    }
  }
}
34
// indata c * k
35
// Reorder the data layout from CK to KC.
G
gongweibao 已提交
36
void ReorderCKtoKC(TensorRTEngine::Weight& iweights,  // NOLINT
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
                   TensorRTEngine::Weight* oweights) {
  int c = iweights.dims[0];
  int k = iweights.dims[1];
  oweights->dims.assign({k, c});
  nvinfer1::DimsHW istrides = {1, k};
  nvinfer1::DimsHW ostrides = {c, 1};
  Reorder2({k, c}, static_cast<float const*>(iweights.get().values), istrides,
           static_cast<float*>(const_cast<void*>(oweights->get().values)),
           ostrides);
}

/*
 * FC converter convert a MUL op in Fluid to a FC layer in TRT.
 */
class FcOpConverter : public OpConverter {
 public:
  void operator()(const framework::proto::OpDesc& op,
54
                  const framework::Scope& scope, bool test_mode) override {
55 56
    VLOG(4) << "convert a fluid fc op to tensorrt fc layer without bias";

Y
Yan Chunwei 已提交
57
    framework::OpDesc op_desc(op, nullptr);
58 59 60 61 62 63 64 65 66 67 68 69 70
    PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1);
    PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1);  // Y is a weight
    PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1);

    // Declare inputs
    auto* X = engine_->GetITensor(op_desc.Input("X").front());

    // Declare weights
    auto* Y_v = scope.FindVar(op_desc.Input("Y").front());
    PADDLE_ENFORCE_NOT_NULL(Y_v);
    auto* Y_t = Y_v->GetMutable<framework::LoDTensor>();
    // This may trigger a GPU->CPU copy, because TRT's weight can only be
    // assigned from CPU memory, that can't be avoided.
N
nhzlx 已提交
71 72 73 74
    platform::CPUPlace cpu_place;
    framework::LoDTensor weight_tensor;
    weight_tensor.Resize(Y_t->dims());
    TensorCopySync((*Y_t), cpu_place, &weight_tensor);
75

N
nhzlx 已提交
76 77 78 79 80
    auto* weight_data = weight_tensor.mutable_data<float>(platform::CPUPlace());

    PADDLE_ENFORCE_EQ(weight_tensor.dims().size(), 2UL);  // a matrix
    size_t n_output = weight_tensor.dims()[1];

N
nhzlx 已提交
81
    std::unique_ptr<framework::Tensor> tmp(new framework::LoDTensor());
N
nhzlx 已提交
82 83 84
    tmp->Resize(weight_tensor.dims());

    memcpy(tmp->mutable_data<float>(platform::CPUPlace()), weight_data,
85
           Y_t->dims()[0] * Y_t->dims()[1] * sizeof(float));
86 87 88 89
    TensorRTEngine::Weight weight{nvinfer1::DataType::kFLOAT,
                                  static_cast<void*>(weight_data),
                                  Y_t->memory_size() / sizeof(float)};
    TensorRTEngine::Weight tmp_weight(nvinfer1::DataType::kFLOAT,
N
nhzlx 已提交
90
                                      static_cast<void*>(tmp->data<float>()),
91 92 93 94 95 96
                                      Y_t->memory_size() / sizeof(float));
    weight.dims.assign({Y_t->dims()[0], Y_t->dims()[1]});
    tmp_weight.dims = weight.dims;

    // The data layout of TRT FC layer's weight is different from fluid's FC,
    // need to reorder the elements.
97
    ReorderCKtoKC(weight, &tmp_weight);
98 99 100 101 102 103 104 105 106

    // Currently, the framework can only handle one fluid op -> one TRT layer,
    // but fc fuses `mul` and `bias` (2 fluid ops), so here is a trick, just
    // handle `mul`, leave `add` as another layer.
    // DEBUG
    TensorRTEngine::Weight bias{nvinfer1::DataType::kFLOAT, nullptr, 0};

    auto* layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected,
                                       *const_cast<nvinfer1::ITensor*>(X),
107
                                       n_output, tmp_weight.get(), bias.get());
108 109

    auto output_name = op_desc.Output("Out").front();
110
    engine_->SetITensor(output_name, layer->getOutput(0));
N
nhzlx 已提交
111
    engine_->weight_map[op_desc.Input("Y").front()] = std::move(tmp);
112 113 114
    if (test_mode) {
      engine_->DeclareOutput(output_name);
    }
115 116 117 118 119 120 121
  }
};

}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle

N
nhzlx 已提交
122
REGISTER_TRT_OP_CONVERTER(fc, FcOpConverter);