fc_mkldnn_op.cc 26.9 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
18
#include "paddle/fluid/platform/mkldnn_helper.h"
W
wanghuancoder 已提交
19 20 21 22 23 24 25 26 27

namespace paddle {
namespace framework {
class Tensor;
}  // namespace framework
namespace platform {
class MKLDNNDeviceContext;
}  // namespace platform
}  // namespace paddle
M
mozga-intel 已提交
28 29 30 31

namespace paddle {
namespace operators {

32 33 34 35 36 37 38 39
using framework::DataLayout;
using framework::Tensor;
using framework::LoDTensor;
using framework::DDim;
using framework::ExecutionContext;
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
using platform::GetMKLDNNFormat;
40 41 42 43 44
using dnnl::memory;
using dnnl::inner_product_forward;
using dnnl::primitive;
using dnnl::stream;
using dnnl::prop_kind;
M
mozga-intel 已提交
45

M
Michał Gallus 已提交
46
template <typename T_in, typename T_w, typename T_out>
47
class FCPrimitiveFactory {
M
mozga-intel 已提交
48
 public:
49
  explicit FCPrimitiveFactory(const dnnl::engine& engine) : engine_(engine) {}
50

A
Adam 已提交
51 52
  void ExecuteFcPrimitive(const LoDTensor* input, const Tensor* weights,
                          const Tensor* bias, LoDTensor* output,
53
                          const MKLDNNDeviceContext& dev_ctx,
A
Adam 已提交
54
                          const ExecutionContext& ctx) {
55
    RecomputeOutputDims(ctx, input, weights, output);
M
Michał Gallus 已提交
56 57
    // If primitive has already been created and cached, don't create new one,
    // but update input and output data pointers and return it.
58 59
    if (fc_) {
      UpdateDataPointers(ctx, output, input);
A
Adam 已提交
60 61
      this->Execute();
      return;
62
    }  // Otherwise, create a new one.
M
mozga-intel 已提交
63

64
    auto in_col_dims = ctx.Attr<int>("in_num_col_dims");
T
tianshuo78520a 已提交
65 66 67 68 69 70
    PADDLE_ENFORCE_LE(
        in_col_dims, 2,
        platform::errors::Unimplemented(
            "DNNL FC doesn't support in_num_col_dims parameter to "
            "be higher than "
            "2."));
71 72 73 74 75 76 77 78 79 80 81 82 83
    if (in_col_dims == 2) {
      PADDLE_ENFORCE_EQ(
          input->dims().size(), 3,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "3 dim input is provided."));
      PADDLE_ENFORCE_EQ(
          input->format(), MKLDNNMemoryFormat::ncw,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "input format is equal to ncw."));
    }

84 85
    weights_ = CreateWeightsMemory(weights);

86 87 88 89 90
    // Since MKL-DNN has a lot of limitations on what the input/weights/output
    // dimensions should be, to simplify the code, the creation of primitive
    // descriptor has been divided into separate cases, based on the number
    // of input dimensions.
    size_t input_dim_num = input->dims().size();
91
    paddle::optional<dnnl::inner_product_forward::primitive_desc> fc_prim_desc;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    memory::desc usr_weights_desc = {};
    switch (input_dim_num) {
      case 2:
        fc_prim_desc =
            Create2DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create2DUserWeightsDesc();
        break;
      case 3:
        fc_prim_desc =
            Create3DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create3DUserWeightsDesc(weights);
        break;
      case 4:
        fc_prim_desc =
            Create4DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create4DUserWeightsDesc(input, weights);
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "DNNL FC doesn't support input dims different than 2, 3, 4."));
        break;
113
    }
114 115 116 117
    input_ = CreateMemory<T_in>(fc_prim_desc->src_desc(), input);
    // Update weights format inside of its memory
    weights_ = Reorder(usr_weights_desc, usr_weights_desc,
                       weights_->get_data_handle());
118

119 120 121
    // Quantize weights and reorder to format chosen by FC primitive descriptor.
    QuantizeWeights(ctx, fc_prim_desc->weights_desc());

122
    bias_ = CreateMemoryToBeCached<float>(fc_prim_desc->bias_desc(), bias);
123 124
    // If int8 is desired, quantize bias into 32-bit signed int
    QuantizeBias(*fc_prim_desc, ctx);
M
mozga-intel 已提交
125

126 127 128
    // Store weights and bias in the mkldnn cache
    CacheWeightsAndBias(dev_ctx, ctx);

129 130 131 132 133 134
    // Based on format determined by inner_product, create output in desired
    // memory format
    output_ = CreateDstMemory(*fc_prim_desc, ctx, output);

    // Return MKL-DNN primitive ready to be fed into pipeline and executed
    fc_ = inner_product_forward(*fc_prim_desc);
A
Adam 已提交
135 136 137 138
    this->Execute();
  }

  void Execute() {
139
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
140
    if (bias_) {
141 142 143 144
      fc_->execute(astream, {{DNNL_ARG_SRC, *input_},
                             {DNNL_ARG_WEIGHTS, *weights_},
                             {DNNL_ARG_BIAS, *bias_},
                             {DNNL_ARG_DST, *output_}});
A
Adam 已提交
145
    } else {
146 147 148
      fc_->execute(astream, {{DNNL_ARG_SRC, *input_},
                             {DNNL_ARG_WEIGHTS, *weights_},
                             {DNNL_ARG_DST, *output_}});
A
Adam 已提交
149 150
    }
    astream.wait();
M
mozga-intel 已提交
151 152
  }

153
 private:
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
  // DNNL always returns 2-dimensional data block as a result of computing
  // inner product. Hence the format 'nc' is always set for its output
  // primitive. Therefore, function SetOutputFormat is needed to choose
  // an appropriate format based on the number of input dimensions and
  // format of an input tensor.
  void SetOutputFormat(MKLDNNMemoryFormat in_format, Tensor* out) {
    int dim_num = out->dims().size();
    // In case of 2 dims, we set the only possible format, nc
    if (dim_num == 2) {
      out->set_format(MKLDNNMemoryFormat::nc);
      // In case of 3 dims, we generate a format that is based on number
      // of output dims and the layout of input format (nchw or nhwc).
    } else if (dim_num == 3) {
      if (in_format == MKLDNNMemoryFormat::nwc ||
          in_format == MKLDNNMemoryFormat::nhwc) {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nhwc));
      } else {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nchw));
      }
      // In any other case we overwrite the output format with the input one.
    } else {
      out->set_format(in_format);
    }
  }

181 182
  void UpdateDataPointers(const ExecutionContext& ctx, Tensor* out,
                          const Tensor* in) {
M
Michał Gallus 已提交
183 184 185 186 187
    input_->set_data_handle(to_void_cast(in->data<T_in>()));
    output_->set_data_handle(out->mutable_data<T_out>(ctx.GetPlace()));
    // If the primitive exists, but the output tensor has changed its
    // variable, update its format to what has been determined in first
    // call to CreateFcPrimitive method.
A
Adam 已提交
188
    if (out->format() == MKLDNNMemoryFormat::undef) {
189
      SetOutputFormat(in->format(), out);
190
    }
M
mozga-intel 已提交
191 192
  }

193
  dnnl::inner_product_forward::primitive_desc Create2DFcPrimDescriptor(
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    auto weight_dims = Get2DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get2DWeightDimsForDNNL(const Tensor* weights) {
    auto dims = framework::vectorize(weights->dims());
    std::swap(dims[0], dims[1]);  // swap input dim with output dim
    return dims;
  }

  memory::desc Create2DUserWeightsDesc() { return weights_->get_desc(); }

214
  dnnl::inner_product_forward::primitive_desc Create3DFcPrimDescriptor(
215 216 217
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto input_dims = framework::vectorize(input->dims());
218 219
    std::vector<int64_t> new_input_dims = {input_dims[0] * input_dims[1],
                                           input_dims[2], 1};
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
    auto src_desc = CreateMemDescriptor<T_in>(new_input_dims, input->format());

    auto weight_dims = Get3DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);

    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);

    auto dst_dims = {input_dims[0] * input_dims[1], weight_dims[0]};
    auto dst_desc =
        CreateMemDescriptor<T_out>(dst_dims, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get3DWeightDimsForDNNL(const Tensor* weights) {
    auto paddle_w_dims = framework::vectorize(weights->dims());
237
    return {paddle_w_dims[1], paddle_w_dims[0], 1};
238 239 240 241 242 243 244
  }

  memory::desc Create3DUserWeightsDesc(const Tensor* weights) {
    auto dims = Get3DWeightDimsForDNNL(weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oiw);
  }

245
  dnnl::inner_product_forward::primitive_desc Create4DFcPrimDescriptor(
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    // Since MKL-DNN doesn't support 4D column-major data formats in
    // inner_product primitive, transpose the weights to be in
    // row-major format
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    auto weights_desc = CreateMemDescriptor<T_w>(dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get4DWeightDimsForDNNL(const LoDTensor* input,
                                              const Tensor* weights) {
    auto old_w_dims = framework::vectorize(weights->dims());
    auto old_in_dims = framework::vectorize(input->dims());
    auto dims = {old_w_dims[1], old_in_dims[1], old_in_dims[2], old_in_dims[3]};
    return dims;
  }

  memory::desc Create4DUserWeightsDesc(const LoDTensor* input,
                                       const Tensor* weights) {
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oihw);
M
mozga-intel 已提交
272 273
  }

M
Michał Gallus 已提交
274
  // Convert data from one data format to another
275 276 277
  std::shared_ptr<dnnl::memory> Reorder(const memory::desc& src_desc,
                                        const memory::desc& dst_desc,
                                        void* src_data) {
A
Adam 已提交
278
    auto src_mem = memory(src_desc, engine_, src_data);
279
    auto dst_mem = std::make_shared<memory>(dst_desc, engine_);
M
mozga-intel 已提交
280

281
    auto reorder = dnnl::reorder(src_mem, *dst_mem);
282
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
283 284 285 286 287 288 289

    {
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
      reorder.execute(astream, src_mem, *dst_mem);
      astream.wait();
    }
M
mozga-intel 已提交
290

291
    return dst_mem;
M
mozga-intel 已提交
292 293
  }

M
Michał Gallus 已提交
294 295
  // Convert data from one data format to another and rescale it.
  // If the desired data type is (un)signed int8, quantization occurs here.
296
  std::shared_ptr<dnnl::memory> ReorderWithScale(
297 298
      const std::shared_ptr<memory> src_mem, const memory::desc& dst_md,
      const std::vector<float>& scale_data) {
299 300
    auto dst_mem = std::make_shared<dnnl::memory>(dst_md, engine_);
    dnnl::primitive_attr attributes;
M
Michał Gallus 已提交
301 302 303 304 305 306 307 308
    // According to MKL-DNN's documentation mask determines along which
    // dimensions should the scale be applied.
    // 0 - Single scale applied to whole tensor
    // 1 - Apply Scale along a slice of each dimension which index is 1.
    //     In case of weights quantization, that dimension is output,
    //     becuase we perform per-output-channel quantization
    int mask = CreateMask(0, scale_data.size() > 1);
    attributes.set_output_scales(mask, scale_data);
309
    auto reorder = dnnl::reorder(*src_mem, *dst_mem, attributes);
M
Michał Gallus 已提交
310

311
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
312 313 314 315
    {
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
      reorder.execute(astream,
316
                      {{DNNL_ARG_FROM, *src_mem}, {DNNL_ARG_TO, *dst_mem}});
317 318
      astream.wait();
    }
M
Michał Gallus 已提交
319 320 321 322 323

    return dst_mem;
  }

  template <typename T>
324
  static dnnl::memory::desc CreateMemDescriptor(
A
Adam 已提交
325
      const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
326 327
    return platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(),
                                   format);
M
mozga-intel 已提交
328 329
  }

M
Michał Gallus 已提交
330
  template <typename T>
331 332
  static dnnl::memory::desc CreateMemDescriptor(const Tensor* tensor,
                                                MKLDNNMemoryFormat format) {
A
Adam 已提交
333
    auto dims = framework::vectorize(tensor->dims());
M
Michał Gallus 已提交
334
    return CreateMemDescriptor<T>(dims, format);
M
mozga-intel 已提交
335 336
  }

M
Michał Gallus 已提交
337
  template <typename T>
338 339
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc,
                            const Tensor* tensor) {
A
Adam 已提交
340
    return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
M
mozga-intel 已提交
341 342
  }

343
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc, void* data) {
A
Adam 已提交
344
    return memory(desc, engine_, data);
M
mozga-intel 已提交
345 346
  }

347
  template <typename T>
348 349
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, const Tensor* tensor) {
350 351 352 353
    return CreateMemoryToBeCached(desc,
                                  platform::to_void_cast<T>(tensor->data<T>()));
  }

354 355
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, void* data) {
356 357 358 359
    return std::make_shared<memory>(desc, engine_, data);
  }

  // Create weights memory and transform to default MKL-DNN format
360
  std::shared_ptr<dnnl::memory> CreateWeightsMemory(const Tensor* weights) {
A
Adam 已提交
361
    auto dims = framework::vectorize(weights->dims());
362
    std::swap(dims[0], dims[1]);  // Correct output dimensions
M
Michał Gallus 已提交
363 364
    auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
365
    // Transpose weights through MKL-DNN's reorder from io to oi format.
A
Adam 已提交
366 367
    return Reorder(src_desc, dst_desc,
                   platform::to_void_cast<float>(weights->data<float>()));
M
Michał Gallus 已提交
368 369
  }

370 371
  void CacheWeightsAndBias(const MKLDNNDeviceContext& dev_ctx,
                           const ExecutionContext& ctx) {
372 373 374
    std::string key = platform::CreateKey(dev_ctx);
    key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);

375 376 377 378 379 380
    const std::string weights_key = key + ctx.InputName("W");
    const std::string bias_key = key + ctx.InputName("Bias");
    dev_ctx.SetBlob(weights_key, weights_);
    dev_ctx.SetBlob(bias_key, bias_);
  }

M
Michał Gallus 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
  std::vector<float> ComputeBiasScales(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> bias_scales(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        bias_scales[i] = 1.0f;
      else
        bias_scales[i] = scale_in_data * scale_weights_data[i];
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
405 406
  std::tuple<std::vector<float>, float> ComputeOutputShiftScale(
      const ExecutionContext& ctx) {
M
Michał Gallus 已提交
407 408
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
409

M
Michał Gallus 已提交
410
    // If the output will be in floats, we don't multiply by scale_out.
411 412 413 414 415 416 417 418 419 420 421
    float activation_scale = 1.0f;
    float inner_scale = 1.0f;
    if (!ctx.Attr<bool>("force_fp32_output")) {
      // if has activation use it's scale, otherwise use inner scale.
      if (!ctx.Attr<std::string>("activation_type").empty()) {
        activation_scale = ctx.Attr<float>("Scale_out");
      } else {
        inner_scale = ctx.Attr<float>("Scale_out");
      }
    }

M
Michał Gallus 已提交
422 423 424 425 426 427
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> output_shift_scale(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
428
        output_shift_scale[i] = inner_scale;
M
Michał Gallus 已提交
429 430
      else
        output_shift_scale[i] =
431
            inner_scale / (scale_in_data * scale_weights_data[i]);
M
Michał Gallus 已提交
432 433
    }

434
    return make_tuple(output_shift_scale, activation_scale);
M
Michał Gallus 已提交
435 436 437 438 439 440 441 442 443 444
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

445
  void QuantizeWeights(const ExecutionContext& ctx, memory::desc dst) {
446 447
    weights_ = ReorderWithScale(weights_, dst,
                                ctx.Attr<std::vector<float>>("Scale_weights"));
M
Michał Gallus 已提交
448 449 450 451 452
  }

  void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
                    const ExecutionContext& ctx) {
    auto bias_scales = ComputeBiasScales(ctx);
453
    bias_ = ReorderWithScale(bias_, fc_prim_desc.bias_desc(), bias_scales);
M
Michał Gallus 已提交
454 455 456
  }

  // Fuse relu into FC with activation type attribute has been set to 'relu'
457 458 459
  dnnl::primitive_attr CreatePostOps(const ExecutionContext& ctx) {
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
M
Michał Gallus 已提交
460

461 462 463
    std::vector<float> output_shift_scale;
    float scale;
    std::tie(output_shift_scale, scale) = ComputeOutputShiftScale(ctx);
M
Michał Gallus 已提交
464 465 466 467 468 469
    int mask = CreateMask(1, output_shift_scale.size() > 1);
    attributes.set_output_scales(mask, output_shift_scale);

    if (ctx.Attr<std::string>("activation_type") == "relu") {
      constexpr float negative_slope = 0.0f;
      constexpr float placeholder = 1.0f;  // beta
470
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_relu,
M
Michał Gallus 已提交
471
                                     negative_slope, placeholder);
472 473 474
    } else if (ctx.Attr<std::string>("activation_type") == "gelu") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
475
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_gelu,
476 477 478 479
                                     alpha, beta);
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_tanh") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
480 481
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_gelu_tanh,
                                     alpha, beta);
482 483 484
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_erf") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
485
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_gelu_erf,
486 487 488 489
                                     alpha, beta);
    } else if (ctx.Attr<std::string>("activation_type") == "tanh") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
490
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_tanh,
491 492 493 494
                                     alpha, beta);
    } else if (ctx.Attr<std::string>("activation_type") == "sigmoid") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
495
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_logistic,
496
                                     alpha, beta);
J
jakpiase 已提交
497 498 499
    } else if (ctx.Attr<std::string>("activation_type") == "hard_swish") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
500 501
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_hardswish,
                                     alpha, beta);
M
Michał Gallus 已提交
502 503 504 505
    }

    attributes.set_post_ops(post_operations);
    return attributes;
506
  }
M
mozga-intel 已提交
507

508 509 510 511 512
  dnnl::inner_product_forward::primitive_desc CreateFcPrimDesc(
      const dnnl::memory::desc& input_desc,
      const dnnl::memory::desc& weights_desc,
      const dnnl::memory::desc& bias_desc, const dnnl::memory::desc& dst_desc,
      const dnnl::primitive_attr& attrs) {
513 514 515
    auto fc_desc =
        inner_product_forward::desc(prop_kind::forward_scoring, input_desc,
                                    weights_desc, bias_desc, dst_desc);
M
mozga-intel 已提交
516

M
Michał Gallus 已提交
517
    return inner_product_forward::primitive_desc(fc_desc, attrs, engine_);
518
  }
M
mozga-intel 已提交
519

M
Michał Gallus 已提交
520 521
  // Create output memory based on output tensor and inner_product
  // primitive descriptor format chosen for output
522 523
  dnnl::memory CreateDstMemory(
      const dnnl::inner_product_forward::primitive_desc& fc_prim_desc,
524
      const ExecutionContext& ctx, Tensor* output) {
A
Adam 已提交
525 526
    auto dst_desc = fc_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
M
Michał Gallus 已提交
527 528
    T_out* output_data =
        output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
529
    memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
530
    SetOutputFormat(ctx.Input<LoDTensor>("Input")->format(), output);
531

A
Adam 已提交
532
    return dst_mem;
533
  }
M
mozga-intel 已提交
534

535 536
  void RecomputeOutputDims(const ExecutionContext& ctx, const LoDTensor* input,
                           const Tensor* w, LoDTensor* output) {
L
luotao1 已提交
537
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
538 539 540 541
    bool padding_weights = ctx.Attr<bool>("padding_weights");
    PADDLE_ENFORCE_EQ(padding_weights, false,
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
542
    std::vector<int64_t> output_dims;
543 544
    FCOutputSize(input->dims(), w->dims(), output_dims, in_num_col_dims,
                 padding_weights);
L
luotao1 已提交
545 546
    output->Resize(framework::make_ddim(output_dims));
    output->set_lod(input->lod());
547
  }
L
luotao1 已提交
548

549
 private:
550
  const dnnl::engine& engine_;
551 552
  paddle::optional<memory> input_;
  paddle::optional<memory> output_;
553 554
  std::shared_ptr<memory> bias_;
  std::shared_ptr<memory> weights_;
555
  paddle::optional<inner_product_forward> fc_;
556
};
M
mozga-intel 已提交
557

M
Michał Gallus 已提交
558 559 560 561 562 563
// Attempt to fetch cached primitive factory based on provided parameters
// of input format, weight dimensions and output name.
// If not cached, create a new one.
template <typename T_in, typename T_w, typename T_out>
static std::shared_ptr<FCPrimitiveFactory<T_in, T_w, T_out>>
GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
564
                    const std::string& key) {
565
  auto prim_creator =
M
Michał Gallus 已提交
566 567
      std::static_pointer_cast<FCPrimitiveFactory<T_in, T_w, T_out>>(
          dev_ctx.GetBlob(key));
568
  if (prim_creator == nullptr) {
569 570
    prim_creator = std::make_shared<FCPrimitiveFactory<T_in, T_w, T_out>>(
        dev_ctx.GetEngine());
571
    dev_ctx.SetBlob(key, prim_creator);
M
mozga-intel 已提交
572 573
  }

574 575
  return prim_creator;
}
M
mozga-intel 已提交
576

M
Michał Gallus 已提交
577 578 579
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
580
static void ExecuteFc(const ExecutionContext& ctx, const LoDTensor* input,
A
Adam 已提交
581
                      const Tensor* w, const Tensor* bias, LoDTensor* output,
582 583
                      bool fuse_relu, bool force_fp32_output) {
  auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
584 585 586 587 588
  std::string prim_key = platform::CreateKey(
      dev_ctx, input->format(), input->dims()[0],
      framework::vectorize<int>(w->dims()), ctx.OutputName("Out"));
  prim_key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, prim_key);

M
Michał Gallus 已提交
589 590
  constexpr bool is_int8 =
      std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
591 592
  bool is_bfloat16 = std::is_same<T_in, paddle::platform::bfloat16>::value;
  if ((!is_int8 && !is_bfloat16) || force_fp32_output) {
593 594
    GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
595 596 597
  } else if (is_bfloat16) {
    GetPrimitiveFactory<T_in, T_w, platform::bfloat16>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
598
  } else if (fuse_relu) {
599 600
    GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
601
  } else {
602 603
    GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
604 605 606 607 608
  }
}

template <typename T_in, typename T_w>
class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
M
mozga-intel 已提交
609 610
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
Michał Gallus 已提交
611 612 613
    PADDLE_ENFORCE_EQ(
        platform::is_cpu_place(ctx.GetPlace()), true,
        platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace."));
614
    platform::MKLDNNDeviceContext::tls().log_lib_version();
615 616
    auto input = ctx.Input<LoDTensor>("Input");
    auto w = ctx.Input<Tensor>("W");
T
tensor-tang 已提交
617
    auto bias = ctx.Input<Tensor>("Bias");
618
    auto output = ctx.Output<LoDTensor>("Out");
M
mozga-intel 已提交
619

M
Michał Gallus 已提交
620 621 622
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

623 624
    ExecuteFc<T_in, T_w>(ctx, input, w, bias, output, fuse_relu,
                         force_fp32_output);
M
mozga-intel 已提交
625

626
    output->set_layout(DataLayout::kMKLDNN);
M
mozga-intel 已提交
627 628 629 630 631
  }
};
}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
632 633 634 635 636 637 638 639
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    FP32, ops::kFCMKLDNNFP32,
                                    ops::FCMKLDNNOpKernel<float, float>);

640 641 642 643 644
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(
    fc, MKLDNN, ::paddle::platform::CPUPlace, BF16, ops::kFCMKLDNNFP32,
    ops::FCMKLDNNOpKernel<paddle::platform::bfloat16,
                          paddle::platform::bfloat16>);

M
Michał Gallus 已提交
645 646 647 648 649 650 651
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    U8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<uint8_t, int8_t>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    S8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<int8_t, int8_t>);