fc.cc 6.2 KB
Newer Older
F
flame 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/anakin/convert/fc.h"
F
flame 已提交
16
#include <algorithm>
17 18
#include <string>
#include <vector>
19
#include "paddle/fluid/inference/anakin/convert/helper.h"
F
flame 已提交
20 21 22 23 24

namespace paddle {
namespace inference {
namespace anakin {

25 26
template <typename TargetT, ::anakin::Precision PrecisionT>
void FcBaseOpConverter<TargetT, PrecisionT>::operator()(
27 28
    const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc,
    const framework::Scope &scope, bool test_mode) {
F
flame 已提交
29
  framework::OpDesc op_desc(op, nullptr);
30
  auto input_names = op_desc.InputNames();
31
  bool with_bias = input_names.size() >= 3;
32 33 34 35 36 37 38

  std::string w_name = "Y";
  std::string i_name = "X";
  if (with_bias) {
    w_name = "W";
    i_name = "Input";
  }
F
flame 已提交
39

F
flame 已提交
40
  auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front();
41 42 43

  // get weights
  auto *y_v = scope.FindVar(op_desc.Input(w_name).front());
F
flame 已提交
44
  PADDLE_ENFORCE_NOT_NULL(y_v);
45 46 47 48 49 50
  auto weight_tensor = tensor_from_var(*y_v, platform::CPUPlace());
  auto weight_shape = framework::vectorize2int(weight_tensor->dims());

  int out_dim = weight_shape[1];
  const int w_m = weight_shape[0];
  const int w_k = weight_shape[1];
F
flame 已提交
51

52
  auto input_name = op_desc.Input(i_name).front();
F
flame 已提交
53 54
  auto output_name = op_desc.Output("Out").front();

55 56 57 58
  this->engine_->AddOp(op_name, "Dense", {input_name}, {output_name});
  this->engine_->AddOpAttr(op_name, "bias_term", with_bias);
  this->engine_->AddOpAttr(op_name, "axis", 1);
  this->engine_->AddOpAttr(op_name, "out_dim", out_dim);
F
flame 已提交
59

60 61
  auto *weight_data = weight_tensor->data<float>();
  PADDLE_ENFORCE(w_m * w_k == weight_tensor->numel());
F
flame 已提交
62

63
  std::vector<float> trans_weight_data(weight_tensor->numel());
64 65 66 67 68
  for (int i = 0; i < w_m; i++) {
    for (int j = 0; j < w_k; j++) {
      trans_weight_data[i + j * w_m] = weight_data[i * w_k + j];
    }
  }
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

  int weight_num = weight_tensor->numel();
  bool enable_int8 = boost::get<bool>(op_desc.HasAttr("enable_int8"));
  if (enable_int8) {
    if (weight_shape.size() < 4UL) {
      weight_shape.insert(weight_shape.begin(), 4UL - weight_shape.size(), 1);
    }
    ::anakin::saber::Shape anakin_shape(weight_shape);
    const float int8_range = 127.;
    float in_scale = boost::get<float>(op_desc.GetAttr("input_scale"));
    float weight_scale = boost::get<float>(op_desc.GetAttr("weight_scale"));
    auto *weight1 = ::anakin::graph::GraphGlobalMem<TargetT>::Global()
                        .template new_block<::anakin::AK_INT8>(anakin_shape);
    std::vector<char> weight_int8;
    for (int i = 0; i < weight_num; i++) {
      bool is_valid_int8 =
          ((trans_weight_data[i] >= -128) && (trans_weight_data[i] <= 127));
      PADDLE_ENFORCE(is_valid_int8,
                     "We are in anakin subgraph int8 mode, the weight of fc "
                     "should be in range [-128, 127]");
      weight_int8.push_back(static_cast<char>(trans_weight_data[i]));
    }
    memcpy(static_cast<void *>(weight1->h_tensor().mutable_data()),
           static_cast<void *>(weight_int8.data()), sizeof(char) * weight_num);
    weight1->d_tensor().set_shape(anakin_shape);
    weight1->d_tensor().copy_from(weight1->h_tensor());
    this->engine_->AddOpAttr(op_name, "weight_1", *weight1);
    this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8);
    this->engine_->Graph()->SetWeightsScale(op_name,
                                            {weight_scale / int8_range}, false);
    this->engine_->AddTensorScale(input_name, in_scale / int8_range);
  } else {
    auto *weight1 = pblock_from_vector<TargetT>(trans_weight_data);
    this->engine_->AddOpAttr(op_name, "weight_1", *weight1);
  }
104 105 106 107 108

  // get bias
  if (with_bias) {
    auto *b_v = scope.FindVar(op_desc.Input("Bias").front());
    PADDLE_ENFORCE_NOT_NULL(b_v);
109
    auto weight2 = pblock_from_var<TargetT>(*b_v);
110
    this->engine_->AddOpAttr(op_name, "weight_2", *weight2);
111
  }
F
flame 已提交
112 113 114 115 116
}

}  // namespace anakin
}  // namespace inference
}  // namespace paddle
117

118
#ifdef PADDLE_WITH_CUDA
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
using mul_nv_fp32 =
    ::paddle::inference::anakin::MulOpConverter<::anakin::saber::NV,
                                                ::anakin::Precision::FP32>;
using fc_nv_fp32 =
    ::paddle::inference::anakin::FcOpConverter<::anakin::saber::NV,
                                               ::anakin::Precision::FP32>;
using mul_nv_int8 =
    ::paddle::inference::anakin::MulOpConverter<::anakin::saber::NV,
                                                ::anakin::Precision::INT8>;
using fc_nv_int8 =
    ::paddle::inference::anakin::FcOpConverter<::anakin::saber::NV,
                                               ::anakin::Precision::INT8>;

REGISTER_CUDA_ANAKIN_OP_CONVERTER(mul, mul_nv_fp32);
REGISTER_CUDA_ANAKIN_OP_CONVERTER(fc, fc_nv_fp32);
REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(mul, mul_nv_int8);
REGISTER_CUDA_INT8_ANAKIN_OP_CONVERTER(fc, fc_nv_int8);
136 137
#endif

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
using mul_cpu_fp32 =
    ::paddle::inference::anakin::MulOpConverter<::anakin::saber::X86,
                                                ::anakin::Precision::FP32>;
using fc_cpu_fp32 =
    ::paddle::inference::anakin::FcOpConverter<::anakin::saber::X86,
                                               ::anakin::Precision::FP32>;
using mul_cpu_int8 =
    ::paddle::inference::anakin::MulOpConverter<::anakin::saber::X86,
                                                ::anakin::Precision::INT8>;
using fc_cpu_int8 =
    ::paddle::inference::anakin::FcOpConverter<::anakin::saber::X86,
                                               ::anakin::Precision::INT8>;

REGISTER_CPU_ANAKIN_OP_CONVERTER(mul, mul_cpu_fp32);
REGISTER_CPU_ANAKIN_OP_CONVERTER(fc, fc_cpu_fp32);
REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(mul, mul_cpu_int8);
REGISTER_CPU_INT8_ANAKIN_OP_CONVERTER(fc, fc_cpu_int8);