fc_mkldnn_op.cc 23.3 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include <mkldnn/include/mkldnn_types.h>
#include <memory>
M
mozga-intel 已提交
17
#include "paddle/fluid/framework/tensor.h"
18
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
19 20
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
21
#include "paddle/fluid/platform/variant.h"
M
mozga-intel 已提交
22 23 24 25

namespace paddle {
namespace operators {

26 27 28 29 30 31 32 33 34 35 36 37 38
using framework::DataLayout;
using framework::Tensor;
using framework::LoDTensor;
using framework::DDim;
using framework::ExecutionContext;
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
using platform::GetMKLDNNFormat;
using mkldnn::memory;
using mkldnn::inner_product_forward;
using mkldnn::primitive;
using mkldnn::stream;
using mkldnn::prop_kind;
M
mozga-intel 已提交
39

M
Michał Gallus 已提交
40
template <typename T_in, typename T_w, typename T_out>
41
class FCPrimitiveFactory {
M
mozga-intel 已提交
42
 public:
43 44
  explicit FCPrimitiveFactory(const mkldnn::engine& engine) : engine_(engine) {}

A
Adam 已提交
45 46 47
  void ExecuteFcPrimitive(const LoDTensor* input, const Tensor* weights,
                          const Tensor* bias, LoDTensor* output,
                          const ExecutionContext& ctx) {
48
    RecomputeOutputDims(ctx, input, weights, output);
M
Michał Gallus 已提交
49 50
    // If primitive has already been created and cached, don't create new one,
    // but update input and output data pointers and return it.
51 52
    if (fc_) {
      UpdateDataPointers(ctx, output, input);
A
Adam 已提交
53 54
      this->Execute();
      return;
55
    }  // Otherwise, create a new one.
M
mozga-intel 已提交
56

57
    auto in_col_dims = ctx.Attr<int>("in_num_col_dims");
T
tianshuo78520a 已提交
58 59 60 61 62 63
    PADDLE_ENFORCE_LE(
        in_col_dims, 2,
        platform::errors::Unimplemented(
            "DNNL FC doesn't support in_num_col_dims parameter to "
            "be higher than "
            "2."));
64 65 66 67 68 69 70 71 72 73 74 75 76
    if (in_col_dims == 2) {
      PADDLE_ENFORCE_EQ(
          input->dims().size(), 3,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "3 dim input is provided."));
      PADDLE_ENFORCE_EQ(
          input->format(), MKLDNNMemoryFormat::ncw,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "input format is equal to ncw."));
    }

77
    // Transform weights to default MKL-DNN format
78
    weights_ = TransposeWeights(weights);
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
    // Since MKL-DNN has a lot of limitations on what the input/weights/output
    // dimensions should be, to simplify the code, the creation of primitive
    // descriptor has been divided into separate cases, based on the number
    // of input dimensions.
    size_t input_dim_num = input->dims().size();
    boost::optional<mkldnn::inner_product_forward::primitive_desc> fc_prim_desc;
    memory::desc usr_weights_desc = {};
    switch (input_dim_num) {
      case 2:
        fc_prim_desc =
            Create2DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create2DUserWeightsDesc();
        break;
      case 3:
        fc_prim_desc =
            Create3DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create3DUserWeightsDesc(weights);
        break;
      case 4:
        fc_prim_desc =
            Create4DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create4DUserWeightsDesc(input, weights);
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "DNNL FC doesn't support input dims different than 2, 3, 4."));
        break;
106
    }
107 108 109 110
    input_ = CreateMemory<T_in>(fc_prim_desc->src_desc(), input);
    // Update weights format inside of its memory
    weights_ = Reorder(usr_weights_desc, usr_weights_desc,
                       weights_->get_data_handle());
111

112 113 114 115 116 117
    // Quantize weights and reorder to format chosen by FC primitive descriptor.
    QuantizeWeights(ctx, fc_prim_desc->weights_desc());

    bias_ = CreateMemory<float>(fc_prim_desc->bias_desc(), bias);
    // If int8 is desired, quantize bias into 32-bit signed int
    QuantizeBias(*fc_prim_desc, ctx);
M
mozga-intel 已提交
118

119 120 121 122 123 124
    // Based on format determined by inner_product, create output in desired
    // memory format
    output_ = CreateDstMemory(*fc_prim_desc, ctx, output);

    // Return MKL-DNN primitive ready to be fed into pipeline and executed
    fc_ = inner_product_forward(*fc_prim_desc);
A
Adam 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
    this->Execute();
  }

  void Execute() {
    mkldnn::stream astream(engine_);
    if (bias_) {
      fc_->execute(astream, {{MKLDNN_ARG_SRC, *input_},
                             {MKLDNN_ARG_WEIGHTS, *weights_},
                             {MKLDNN_ARG_BIAS, *bias_},
                             {MKLDNN_ARG_DST, *output_}});
    } else {
      fc_->execute(astream, {{MKLDNN_ARG_SRC, *input_},
                             {MKLDNN_ARG_WEIGHTS, *weights_},
                             {MKLDNN_ARG_DST, *output_}});
    }
    astream.wait();
M
mozga-intel 已提交
141 142
  }

143
 private:
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
  // DNNL always returns 2-dimensional data block as a result of computing
  // inner product. Hence the format 'nc' is always set for its output
  // primitive. Therefore, function SetOutputFormat is needed to choose
  // an appropriate format based on the number of input dimensions and
  // format of an input tensor.
  void SetOutputFormat(MKLDNNMemoryFormat in_format, Tensor* out) {
    int dim_num = out->dims().size();
    // In case of 2 dims, we set the only possible format, nc
    if (dim_num == 2) {
      out->set_format(MKLDNNMemoryFormat::nc);
      // In case of 3 dims, we generate a format that is based on number
      // of output dims and the layout of input format (nchw or nhwc).
    } else if (dim_num == 3) {
      if (in_format == MKLDNNMemoryFormat::nwc ||
          in_format == MKLDNNMemoryFormat::nhwc) {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nhwc));
      } else {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nchw));
      }
      // In any other case we overwrite the output format with the input one.
    } else {
      out->set_format(in_format);
    }
  }

171 172
  void UpdateDataPointers(const ExecutionContext& ctx, Tensor* out,
                          const Tensor* in) {
M
Michał Gallus 已提交
173 174 175 176 177
    input_->set_data_handle(to_void_cast(in->data<T_in>()));
    output_->set_data_handle(out->mutable_data<T_out>(ctx.GetPlace()));
    // If the primitive exists, but the output tensor has changed its
    // variable, update its format to what has been determined in first
    // call to CreateFcPrimitive method.
A
Adam 已提交
178
    if (out->format() == MKLDNNMemoryFormat::undef) {
179
      SetOutputFormat(in->format(), out);
180
    }
M
mozga-intel 已提交
181 182
  }

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
  mkldnn::inner_product_forward::primitive_desc Create2DFcPrimDescriptor(
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    auto weight_dims = Get2DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get2DWeightDimsForDNNL(const Tensor* weights) {
    auto dims = framework::vectorize(weights->dims());
    std::swap(dims[0], dims[1]);  // swap input dim with output dim
    return dims;
  }

  memory::desc Create2DUserWeightsDesc() { return weights_->get_desc(); }

  mkldnn::inner_product_forward::primitive_desc Create3DFcPrimDescriptor(
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto input_dims = framework::vectorize(input->dims());
208 209
    std::vector<int64_t> new_input_dims = {input_dims[0] * input_dims[1],
                                           input_dims[2], 1};
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
    auto src_desc = CreateMemDescriptor<T_in>(new_input_dims, input->format());

    auto weight_dims = Get3DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);

    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);

    auto dst_dims = {input_dims[0] * input_dims[1], weight_dims[0]};
    auto dst_desc =
        CreateMemDescriptor<T_out>(dst_dims, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get3DWeightDimsForDNNL(const Tensor* weights) {
    auto paddle_w_dims = framework::vectorize(weights->dims());
227
    return {paddle_w_dims[1], paddle_w_dims[0], 1};
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
  }

  memory::desc Create3DUserWeightsDesc(const Tensor* weights) {
    auto dims = Get3DWeightDimsForDNNL(weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oiw);
  }

  mkldnn::inner_product_forward::primitive_desc Create4DFcPrimDescriptor(
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    // Since MKL-DNN doesn't support 4D column-major data formats in
    // inner_product primitive, transpose the weights to be in
    // row-major format
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    auto weights_desc = CreateMemDescriptor<T_w>(dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get4DWeightDimsForDNNL(const LoDTensor* input,
                                              const Tensor* weights) {
    auto old_w_dims = framework::vectorize(weights->dims());
    auto old_in_dims = framework::vectorize(input->dims());
    auto dims = {old_w_dims[1], old_in_dims[1], old_in_dims[2], old_in_dims[3]};
    return dims;
  }

  memory::desc Create4DUserWeightsDesc(const LoDTensor* input,
                                       const Tensor* weights) {
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oihw);
M
mozga-intel 已提交
262 263
  }

M
Michał Gallus 已提交
264
  // Convert data from one data format to another
265
  mkldnn::memory Reorder(const memory::desc& src_desc,
A
Adam 已提交
266 267 268
                         const memory::desc& dst_desc, void* src_data) {
    auto src_mem = memory(src_desc, engine_, src_data);
    auto dst_mem = memory(dst_desc, engine_);
M
mozga-intel 已提交
269

270
    auto reorder = mkldnn::reorder(src_mem, dst_mem);
A
Adam 已提交
271 272 273
    mkldnn::stream astream(engine_);
    reorder.execute(astream, src_mem, dst_mem);
    astream.wait();
M
mozga-intel 已提交
274

275
    return dst_mem;
M
mozga-intel 已提交
276 277
  }

M
Michał Gallus 已提交
278 279
  // Convert data from one data format to another and rescale it.
  // If the desired data type is (un)signed int8, quantization occurs here.
A
Adam 已提交
280
  mkldnn::memory Reorder(const memory& src_mem, const memory::desc& dst_md,
M
Michał Gallus 已提交
281
                         const std::vector<float>& scale_data) {
A
Adam 已提交
282
    mkldnn::memory dst_mem = mkldnn::memory(dst_md, engine_);
M
Michał Gallus 已提交
283 284 285 286 287 288 289 290 291
    mkldnn::primitive_attr attributes;
    // According to MKL-DNN's documentation mask determines along which
    // dimensions should the scale be applied.
    // 0 - Single scale applied to whole tensor
    // 1 - Apply Scale along a slice of each dimension which index is 1.
    //     In case of weights quantization, that dimension is output,
    //     becuase we perform per-output-channel quantization
    int mask = CreateMask(0, scale_data.size() > 1);
    attributes.set_output_scales(mask, scale_data);
A
Adam 已提交
292
    auto reorder = mkldnn::reorder(src_mem, dst_mem, attributes);
M
Michał Gallus 已提交
293

A
Adam 已提交
294 295 296 297
    mkldnn::stream astream(engine_);
    reorder.execute(astream,
                    {{MKLDNN_ARG_FROM, src_mem}, {MKLDNN_ARG_TO, dst_mem}});
    astream.wait();
M
Michał Gallus 已提交
298 299 300 301 302

    return dst_mem;
  }

  template <typename T>
A
Adam 已提交
303 304
  static mkldnn::memory::desc CreateMemDescriptor(
      const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
305 306
    return platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(),
                                   format);
M
mozga-intel 已提交
307 308
  }

M
Michał Gallus 已提交
309
  template <typename T>
310
  static mkldnn::memory::desc CreateMemDescriptor(const Tensor* tensor,
311
                                                  MKLDNNMemoryFormat format) {
A
Adam 已提交
312
    auto dims = framework::vectorize(tensor->dims());
M
Michał Gallus 已提交
313
    return CreateMemDescriptor<T>(dims, format);
M
mozga-intel 已提交
314 315
  }

M
Michał Gallus 已提交
316
  template <typename T>
317 318
  mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc,
                              const Tensor* tensor) {
A
Adam 已提交
319
    return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
M
mozga-intel 已提交
320 321
  }

A
Adam 已提交
322 323
  mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc, void* data) {
    return memory(desc, engine_, data);
M
mozga-intel 已提交
324 325
  }

M
Michał Gallus 已提交
326
  // Transpose weights through MKL-DNN's reorder from io to oi format.
327
  mkldnn::memory TransposeWeights(const Tensor* weights) {
A
Adam 已提交
328
    auto dims = framework::vectorize(weights->dims());
329
    std::swap(dims[0], dims[1]);  // Correct output dimensions
M
Michał Gallus 已提交
330 331
    auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
A
Adam 已提交
332 333
    return Reorder(src_desc, dst_desc,
                   platform::to_void_cast<float>(weights->data<float>()));
M
Michał Gallus 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
  }

  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
  std::vector<float> ComputeBiasScales(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> bias_scales(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        bias_scales[i] = 1.0f;
      else
        bias_scales[i] = scale_in_data * scale_weights_data[i];
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
  std::vector<float> ComputeOutputShiftScale(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    // If the output will be in floats, we don't multiply by scale_out.
    auto scale_out_data = ctx.Attr<bool>("force_fp32_output")
                              ? 1.0f
                              : ctx.Attr<float>("Scale_out");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> output_shift_scale(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        output_shift_scale[i] = scale_out_data;
      else
        output_shift_scale[i] =
            scale_out_data / (scale_in_data * scale_weights_data[i]);
    }

    return output_shift_scale;
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

390 391 392
  void QuantizeWeights(const ExecutionContext& ctx, memory::desc dst) {
    weights_ =
        Reorder(*weights_, dst, ctx.Attr<std::vector<float>>("Scale_weights"));
M
Michał Gallus 已提交
393 394 395 396 397
  }

  void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
                    const ExecutionContext& ctx) {
    auto bias_scales = ComputeBiasScales(ctx);
A
Adam 已提交
398
    bias_ = Reorder(*bias_, fc_prim_desc.bias_desc(), bias_scales);
M
Michał Gallus 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
  }

  // Fuse relu into FC with activation type attribute has been set to 'relu'
  mkldnn::primitive_attr CreatePostOps(const ExecutionContext& ctx) {
    mkldnn::primitive_attr attributes;
    mkldnn::post_ops post_operations;

    auto output_shift_scale = ComputeOutputShiftScale(ctx);
    int mask = CreateMask(1, output_shift_scale.size() > 1);
    attributes.set_output_scales(mask, output_shift_scale);

    if (ctx.Attr<std::string>("activation_type") == "relu") {
      constexpr float scale = 1.0f;
      constexpr float negative_slope = 0.0f;
      constexpr float placeholder = 1.0f;  // beta
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
                                     negative_slope, placeholder);
    }

    attributes.set_post_ops(post_operations);
    return attributes;
420
  }
M
mozga-intel 已提交
421

422 423 424 425
  mkldnn::inner_product_forward::primitive_desc CreateFcPrimDesc(
      const mkldnn::memory::desc& input_desc,
      const mkldnn::memory::desc& weights_desc,
      const mkldnn::memory::desc& bias_desc,
M
Michał Gallus 已提交
426 427
      const mkldnn::memory::desc& dst_desc,
      const mkldnn::primitive_attr& attrs) {
428 429 430
    auto fc_desc =
        inner_product_forward::desc(prop_kind::forward_scoring, input_desc,
                                    weights_desc, bias_desc, dst_desc);
M
mozga-intel 已提交
431

M
Michał Gallus 已提交
432
    return inner_product_forward::primitive_desc(fc_desc, attrs, engine_);
433
  }
M
mozga-intel 已提交
434

M
Michał Gallus 已提交
435 436
  // Create output memory based on output tensor and inner_product
  // primitive descriptor format chosen for output
437 438 439
  mkldnn::memory CreateDstMemory(
      const mkldnn::inner_product_forward::primitive_desc& fc_prim_desc,
      const ExecutionContext& ctx, Tensor* output) {
A
Adam 已提交
440 441
    auto dst_desc = fc_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
M
Michał Gallus 已提交
442 443
    T_out* output_data =
        output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
444
    memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
445
    SetOutputFormat(ctx.Input<LoDTensor>("Input")->format(), output);
446

A
Adam 已提交
447
    return dst_mem;
448
  }
M
mozga-intel 已提交
449

450 451
  void RecomputeOutputDims(const ExecutionContext& ctx, const LoDTensor* input,
                           const Tensor* w, LoDTensor* output) {
L
luotao1 已提交
452
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
453 454 455 456
    bool padding_weights = ctx.Attr<bool>("padding_weights");
    PADDLE_ENFORCE_EQ(padding_weights, false,
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
457
    std::vector<int64_t> output_dims;
458 459
    FCOutputSize(input->dims(), w->dims(), output_dims, in_num_col_dims,
                 padding_weights);
L
luotao1 已提交
460 461
    output->Resize(framework::make_ddim(output_dims));
    output->set_lod(input->lod());
462
  }
L
luotao1 已提交
463

464 465 466 467 468 469 470 471
 private:
  const mkldnn::engine& engine_;
  boost::optional<memory> bias_;
  boost::optional<memory> input_;
  boost::optional<memory> output_;
  boost::optional<memory> weights_;
  boost::optional<inner_product_forward> fc_;
};
M
mozga-intel 已提交
472

M
Michał Gallus 已提交
473 474 475 476 477 478 479 480 481
// Attempt to fetch cached primitive factory based on provided parameters
// of input format, weight dimensions and output name.
// If not cached, create a new one.
template <typename T_in, typename T_w, typename T_out>
static std::shared_ptr<FCPrimitiveFactory<T_in, T_w, T_out>>
GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
                    const ExecutionContext& ctx, const Tensor* input,
                    const Tensor* weights,
                    const mkldnn::engine& mkldnn_engine) {
482
  const std::string key = platform::CreateKey(
483
      platform::ThreadIDasStr(), input->format(), input->dims()[0],
H
hong 已提交
484
      framework::vectorize<int>(weights->dims()), ctx.OutputName("Out"));
485 486

  auto prim_creator =
M
Michał Gallus 已提交
487 488
      std::static_pointer_cast<FCPrimitiveFactory<T_in, T_w, T_out>>(
          dev_ctx.GetBlob(key));
489
  if (prim_creator == nullptr) {
M
Michał Gallus 已提交
490 491
    prim_creator =
        std::make_shared<FCPrimitiveFactory<T_in, T_w, T_out>>(mkldnn_engine);
492
    dev_ctx.SetBlob(key, prim_creator);
M
mozga-intel 已提交
493 494
  }

495 496
  return prim_creator;
}
M
mozga-intel 已提交
497

M
Michał Gallus 已提交
498 499 500
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
A
Adam 已提交
501 502 503 504 505
static void ExecuteFc(const MKLDNNDeviceContext& dev_ctx,
                      const ExecutionContext& ctx, const LoDTensor* input,
                      const Tensor* w, const Tensor* bias, LoDTensor* output,
                      const mkldnn::engine& mkldnn_engine, bool fuse_relu,
                      bool force_fp32_output) {
M
Michał Gallus 已提交
506 507 508
  constexpr bool is_int8 =
      std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
  if (!is_int8 || force_fp32_output) {
A
Adam 已提交
509 510
    GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, ctx, input, w, mkldnn_engine)
        ->ExecuteFcPrimitive(input, w, bias, output, ctx);
M
Michał Gallus 已提交
511
  } else if (fuse_relu) {
A
Adam 已提交
512 513 514
    GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, ctx, input, w,
                                            mkldnn_engine)
        ->ExecuteFcPrimitive(input, w, bias, output, ctx);
M
Michał Gallus 已提交
515
  } else {
A
Adam 已提交
516 517 518
    GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, ctx, input, w,
                                           mkldnn_engine)
        ->ExecuteFcPrimitive(input, w, bias, output, ctx);
M
Michał Gallus 已提交
519 520 521 522 523
  }
}

template <typename T_in, typename T_w>
class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
M
mozga-intel 已提交
524 525
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
Michał Gallus 已提交
526 527 528
    PADDLE_ENFORCE_EQ(
        platform::is_cpu_place(ctx.GetPlace()), true,
        platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace."));
M
mozga-intel 已提交
529 530 531
    auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
    const auto& mkldnn_engine = dev_ctx.GetEngine();

532 533
    auto input = ctx.Input<LoDTensor>("Input");
    auto w = ctx.Input<Tensor>("W");
T
tensor-tang 已提交
534
    auto bias = ctx.Input<Tensor>("Bias");
535
    auto output = ctx.Output<LoDTensor>("Out");
M
mozga-intel 已提交
536

M
Michał Gallus 已提交
537 538 539
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

A
Adam 已提交
540 541
    ExecuteFc<T_in, T_w>(dev_ctx, ctx, input, w, bias, output, mkldnn_engine,
                         fuse_relu, force_fp32_output);
M
mozga-intel 已提交
542

543
    output->set_layout(DataLayout::kMKLDNN);
M
mozga-intel 已提交
544 545 546 547 548
  }
};
}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    FP32, ops::kFCMKLDNNFP32,
                                    ops::FCMKLDNNOpKernel<float, float>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    U8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<uint8_t, int8_t>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    S8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<int8_t, int8_t>);