fc_mkldnn_op.cc 26.8 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
18
#include "paddle/fluid/platform/mkldnn_helper.h"
W
wanghuancoder 已提交
19 20 21 22 23 24 25 26 27 28

namespace paddle {
namespace framework {
class LoDTensor;
class Tensor;
}  // namespace framework
namespace platform {
class MKLDNNDeviceContext;
}  // namespace platform
}  // namespace paddle
M
mozga-intel 已提交
29 30 31 32

namespace paddle {
namespace operators {

33 34 35 36 37 38 39 40
using framework::DataLayout;
using framework::Tensor;
using framework::LoDTensor;
using framework::DDim;
using framework::ExecutionContext;
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
using platform::GetMKLDNNFormat;
41 42 43 44 45
using dnnl::memory;
using dnnl::inner_product_forward;
using dnnl::primitive;
using dnnl::stream;
using dnnl::prop_kind;
M
mozga-intel 已提交
46

M
Michał Gallus 已提交
47
template <typename T_in, typename T_w, typename T_out>
48
class FCPrimitiveFactory {
M
mozga-intel 已提交
49
 public:
50
  explicit FCPrimitiveFactory(const dnnl::engine& engine) : engine_(engine) {}
51

A
Adam 已提交
52 53
  void ExecuteFcPrimitive(const LoDTensor* input, const Tensor* weights,
                          const Tensor* bias, LoDTensor* output,
54
                          const MKLDNNDeviceContext& dev_ctx,
A
Adam 已提交
55
                          const ExecutionContext& ctx) {
56
    RecomputeOutputDims(ctx, input, weights, output);
M
Michał Gallus 已提交
57 58
    // If primitive has already been created and cached, don't create new one,
    // but update input and output data pointers and return it.
59 60
    if (fc_) {
      UpdateDataPointers(ctx, output, input);
A
Adam 已提交
61 62
      this->Execute();
      return;
63
    }  // Otherwise, create a new one.
M
mozga-intel 已提交
64

65
    auto in_col_dims = ctx.Attr<int>("in_num_col_dims");
T
tianshuo78520a 已提交
66 67 68 69 70 71
    PADDLE_ENFORCE_LE(
        in_col_dims, 2,
        platform::errors::Unimplemented(
            "DNNL FC doesn't support in_num_col_dims parameter to "
            "be higher than "
            "2."));
72 73 74 75 76 77 78 79 80 81 82 83 84
    if (in_col_dims == 2) {
      PADDLE_ENFORCE_EQ(
          input->dims().size(), 3,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "3 dim input is provided."));
      PADDLE_ENFORCE_EQ(
          input->format(), MKLDNNMemoryFormat::ncw,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "input format is equal to ncw."));
    }

85 86
    weights_ = CreateWeightsMemory(weights);

87 88 89 90 91
    // Since MKL-DNN has a lot of limitations on what the input/weights/output
    // dimensions should be, to simplify the code, the creation of primitive
    // descriptor has been divided into separate cases, based on the number
    // of input dimensions.
    size_t input_dim_num = input->dims().size();
92
    paddle::optional<dnnl::inner_product_forward::primitive_desc> fc_prim_desc;
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
    memory::desc usr_weights_desc = {};
    switch (input_dim_num) {
      case 2:
        fc_prim_desc =
            Create2DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create2DUserWeightsDesc();
        break;
      case 3:
        fc_prim_desc =
            Create3DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create3DUserWeightsDesc(weights);
        break;
      case 4:
        fc_prim_desc =
            Create4DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create4DUserWeightsDesc(input, weights);
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "DNNL FC doesn't support input dims different than 2, 3, 4."));
        break;
114
    }
115 116 117 118
    input_ = CreateMemory<T_in>(fc_prim_desc->src_desc(), input);
    // Update weights format inside of its memory
    weights_ = Reorder(usr_weights_desc, usr_weights_desc,
                       weights_->get_data_handle());
119

120 121 122
    // Quantize weights and reorder to format chosen by FC primitive descriptor.
    QuantizeWeights(ctx, fc_prim_desc->weights_desc());

123
    bias_ = CreateMemoryToBeCached<float>(fc_prim_desc->bias_desc(), bias);
124 125
    // If int8 is desired, quantize bias into 32-bit signed int
    QuantizeBias(*fc_prim_desc, ctx);
M
mozga-intel 已提交
126

127 128 129
    // Store weights and bias in the mkldnn cache
    CacheWeightsAndBias(dev_ctx, ctx);

130 131 132 133 134 135
    // Based on format determined by inner_product, create output in desired
    // memory format
    output_ = CreateDstMemory(*fc_prim_desc, ctx, output);

    // Return MKL-DNN primitive ready to be fed into pipeline and executed
    fc_ = inner_product_forward(*fc_prim_desc);
A
Adam 已提交
136 137 138 139
    this->Execute();
  }

  void Execute() {
140
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
141
    if (bias_) {
142 143 144 145
      fc_->execute(astream, {{DNNL_ARG_SRC, *input_},
                             {DNNL_ARG_WEIGHTS, *weights_},
                             {DNNL_ARG_BIAS, *bias_},
                             {DNNL_ARG_DST, *output_}});
A
Adam 已提交
146
    } else {
147 148 149
      fc_->execute(astream, {{DNNL_ARG_SRC, *input_},
                             {DNNL_ARG_WEIGHTS, *weights_},
                             {DNNL_ARG_DST, *output_}});
A
Adam 已提交
150 151
    }
    astream.wait();
M
mozga-intel 已提交
152 153
  }

154
 private:
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
  // DNNL always returns 2-dimensional data block as a result of computing
  // inner product. Hence the format 'nc' is always set for its output
  // primitive. Therefore, function SetOutputFormat is needed to choose
  // an appropriate format based on the number of input dimensions and
  // format of an input tensor.
  void SetOutputFormat(MKLDNNMemoryFormat in_format, Tensor* out) {
    int dim_num = out->dims().size();
    // In case of 2 dims, we set the only possible format, nc
    if (dim_num == 2) {
      out->set_format(MKLDNNMemoryFormat::nc);
      // In case of 3 dims, we generate a format that is based on number
      // of output dims and the layout of input format (nchw or nhwc).
    } else if (dim_num == 3) {
      if (in_format == MKLDNNMemoryFormat::nwc ||
          in_format == MKLDNNMemoryFormat::nhwc) {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nhwc));
      } else {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nchw));
      }
      // In any other case we overwrite the output format with the input one.
    } else {
      out->set_format(in_format);
    }
  }

182 183
  void UpdateDataPointers(const ExecutionContext& ctx, Tensor* out,
                          const Tensor* in) {
M
Michał Gallus 已提交
184 185 186 187 188
    input_->set_data_handle(to_void_cast(in->data<T_in>()));
    output_->set_data_handle(out->mutable_data<T_out>(ctx.GetPlace()));
    // If the primitive exists, but the output tensor has changed its
    // variable, update its format to what has been determined in first
    // call to CreateFcPrimitive method.
A
Adam 已提交
189
    if (out->format() == MKLDNNMemoryFormat::undef) {
190
      SetOutputFormat(in->format(), out);
191
    }
M
mozga-intel 已提交
192 193
  }

194
  dnnl::inner_product_forward::primitive_desc Create2DFcPrimDescriptor(
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    auto weight_dims = Get2DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get2DWeightDimsForDNNL(const Tensor* weights) {
    auto dims = framework::vectorize(weights->dims());
    std::swap(dims[0], dims[1]);  // swap input dim with output dim
    return dims;
  }

  memory::desc Create2DUserWeightsDesc() { return weights_->get_desc(); }

215
  dnnl::inner_product_forward::primitive_desc Create3DFcPrimDescriptor(
216 217 218
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto input_dims = framework::vectorize(input->dims());
219 220
    std::vector<int64_t> new_input_dims = {input_dims[0] * input_dims[1],
                                           input_dims[2], 1};
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
    auto src_desc = CreateMemDescriptor<T_in>(new_input_dims, input->format());

    auto weight_dims = Get3DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);

    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);

    auto dst_dims = {input_dims[0] * input_dims[1], weight_dims[0]};
    auto dst_desc =
        CreateMemDescriptor<T_out>(dst_dims, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get3DWeightDimsForDNNL(const Tensor* weights) {
    auto paddle_w_dims = framework::vectorize(weights->dims());
238
    return {paddle_w_dims[1], paddle_w_dims[0], 1};
239 240 241 242 243 244 245
  }

  memory::desc Create3DUserWeightsDesc(const Tensor* weights) {
    auto dims = Get3DWeightDimsForDNNL(weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oiw);
  }

246
  dnnl::inner_product_forward::primitive_desc Create4DFcPrimDescriptor(
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    // Since MKL-DNN doesn't support 4D column-major data formats in
    // inner_product primitive, transpose the weights to be in
    // row-major format
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    auto weights_desc = CreateMemDescriptor<T_w>(dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get4DWeightDimsForDNNL(const LoDTensor* input,
                                              const Tensor* weights) {
    auto old_w_dims = framework::vectorize(weights->dims());
    auto old_in_dims = framework::vectorize(input->dims());
    auto dims = {old_w_dims[1], old_in_dims[1], old_in_dims[2], old_in_dims[3]};
    return dims;
  }

  memory::desc Create4DUserWeightsDesc(const LoDTensor* input,
                                       const Tensor* weights) {
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oihw);
M
mozga-intel 已提交
273 274
  }

M
Michał Gallus 已提交
275
  // Convert data from one data format to another
276 277 278
  std::shared_ptr<dnnl::memory> Reorder(const memory::desc& src_desc,
                                        const memory::desc& dst_desc,
                                        void* src_data) {
A
Adam 已提交
279
    auto src_mem = memory(src_desc, engine_, src_data);
280
    auto dst_mem = std::make_shared<memory>(dst_desc, engine_);
M
mozga-intel 已提交
281

282
    auto reorder = dnnl::reorder(src_mem, *dst_mem);
283
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
284 285 286 287 288 289 290

    {
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
      reorder.execute(astream, src_mem, *dst_mem);
      astream.wait();
    }
M
mozga-intel 已提交
291

292
    return dst_mem;
M
mozga-intel 已提交
293 294
  }

M
Michał Gallus 已提交
295 296
  // Convert data from one data format to another and rescale it.
  // If the desired data type is (un)signed int8, quantization occurs here.
297
  std::shared_ptr<dnnl::memory> ReorderWithScale(
298 299
      const std::shared_ptr<memory> src_mem, const memory::desc& dst_md,
      const std::vector<float>& scale_data) {
300 301
    auto dst_mem = std::make_shared<dnnl::memory>(dst_md, engine_);
    dnnl::primitive_attr attributes;
M
Michał Gallus 已提交
302 303 304 305 306 307 308 309
    // According to MKL-DNN's documentation mask determines along which
    // dimensions should the scale be applied.
    // 0 - Single scale applied to whole tensor
    // 1 - Apply Scale along a slice of each dimension which index is 1.
    //     In case of weights quantization, that dimension is output,
    //     becuase we perform per-output-channel quantization
    int mask = CreateMask(0, scale_data.size() > 1);
    attributes.set_output_scales(mask, scale_data);
310
    auto reorder = dnnl::reorder(*src_mem, *dst_mem, attributes);
M
Michał Gallus 已提交
311

312
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
313 314 315 316
    {
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
      reorder.execute(astream,
317
                      {{DNNL_ARG_FROM, *src_mem}, {DNNL_ARG_TO, *dst_mem}});
318 319
      astream.wait();
    }
M
Michał Gallus 已提交
320 321 322 323 324

    return dst_mem;
  }

  template <typename T>
325
  static dnnl::memory::desc CreateMemDescriptor(
A
Adam 已提交
326
      const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
327 328
    return platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(),
                                   format);
M
mozga-intel 已提交
329 330
  }

M
Michał Gallus 已提交
331
  template <typename T>
332 333
  static dnnl::memory::desc CreateMemDescriptor(const Tensor* tensor,
                                                MKLDNNMemoryFormat format) {
A
Adam 已提交
334
    auto dims = framework::vectorize(tensor->dims());
M
Michał Gallus 已提交
335
    return CreateMemDescriptor<T>(dims, format);
M
mozga-intel 已提交
336 337
  }

M
Michał Gallus 已提交
338
  template <typename T>
339 340
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc,
                            const Tensor* tensor) {
A
Adam 已提交
341
    return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
M
mozga-intel 已提交
342 343
  }

344
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc, void* data) {
A
Adam 已提交
345
    return memory(desc, engine_, data);
M
mozga-intel 已提交
346 347
  }

348
  template <typename T>
349 350
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, const Tensor* tensor) {
351 352 353 354
    return CreateMemoryToBeCached(desc,
                                  platform::to_void_cast<T>(tensor->data<T>()));
  }

355 356
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, void* data) {
357 358 359 360
    return std::make_shared<memory>(desc, engine_, data);
  }

  // Create weights memory and transform to default MKL-DNN format
361
  std::shared_ptr<dnnl::memory> CreateWeightsMemory(const Tensor* weights) {
A
Adam 已提交
362
    auto dims = framework::vectorize(weights->dims());
363
    std::swap(dims[0], dims[1]);  // Correct output dimensions
M
Michał Gallus 已提交
364 365
    auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
366
    // Transpose weights through MKL-DNN's reorder from io to oi format.
A
Adam 已提交
367 368
    return Reorder(src_desc, dst_desc,
                   platform::to_void_cast<float>(weights->data<float>()));
M
Michał Gallus 已提交
369 370
  }

371 372
  void CacheWeightsAndBias(const MKLDNNDeviceContext& dev_ctx,
                           const ExecutionContext& ctx) {
373 374 375
    std::string key = platform::CreateKey(dev_ctx);
    key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);

376 377 378 379 380 381
    const std::string weights_key = key + ctx.InputName("W");
    const std::string bias_key = key + ctx.InputName("Bias");
    dev_ctx.SetBlob(weights_key, weights_);
    dev_ctx.SetBlob(bias_key, bias_);
  }

M
Michał Gallus 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
  std::vector<float> ComputeBiasScales(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> bias_scales(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        bias_scales[i] = 1.0f;
      else
        bias_scales[i] = scale_in_data * scale_weights_data[i];
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
  std::vector<float> ComputeOutputShiftScale(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    // If the output will be in floats, we don't multiply by scale_out.
    auto scale_out_data = ctx.Attr<bool>("force_fp32_output")
                              ? 1.0f
                              : ctx.Attr<float>("Scale_out");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> output_shift_scale(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        output_shift_scale[i] = scale_out_data;
      else
        output_shift_scale[i] =
            scale_out_data / (scale_in_data * scale_weights_data[i]);
    }

    return output_shift_scale;
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

436
  void QuantizeWeights(const ExecutionContext& ctx, memory::desc dst) {
437 438
    weights_ = ReorderWithScale(weights_, dst,
                                ctx.Attr<std::vector<float>>("Scale_weights"));
M
Michał Gallus 已提交
439 440 441 442 443
  }

  void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
                    const ExecutionContext& ctx) {
    auto bias_scales = ComputeBiasScales(ctx);
444
    bias_ = ReorderWithScale(bias_, fc_prim_desc.bias_desc(), bias_scales);
M
Michał Gallus 已提交
445 446 447
  }

  // Fuse relu into FC with activation type attribute has been set to 'relu'
448 449 450
  dnnl::primitive_attr CreatePostOps(const ExecutionContext& ctx) {
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
M
Michał Gallus 已提交
451 452 453 454 455 456 457 458 459

    auto output_shift_scale = ComputeOutputShiftScale(ctx);
    int mask = CreateMask(1, output_shift_scale.size() > 1);
    attributes.set_output_scales(mask, output_shift_scale);

    if (ctx.Attr<std::string>("activation_type") == "relu") {
      constexpr float scale = 1.0f;
      constexpr float negative_slope = 0.0f;
      constexpr float placeholder = 1.0f;  // beta
460
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_relu,
M
Michał Gallus 已提交
461
                                     negative_slope, placeholder);
462 463 464 465
    } else if (ctx.Attr<std::string>("activation_type") == "gelu") {
      constexpr float scale = 1.0f;
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
466
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_gelu,
467 468 469 470 471
                                     alpha, beta);
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_tanh") {
      constexpr float scale = 1.0f;
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
472 473
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_gelu_tanh,
                                     alpha, beta);
474 475 476 477
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_erf") {
      constexpr float scale = 1.0f;
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
478
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_gelu_erf,
479 480 481 482 483
                                     alpha, beta);
    } else if (ctx.Attr<std::string>("activation_type") == "tanh") {
      constexpr float scale = 1.0f;
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
484
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_tanh,
485 486 487 488 489
                                     alpha, beta);
    } else if (ctx.Attr<std::string>("activation_type") == "sigmoid") {
      constexpr float scale = 1.0f;
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
490
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_logistic,
491
                                     alpha, beta);
J
jakpiase 已提交
492 493 494 495
    } else if (ctx.Attr<std::string>("activation_type") == "hard_swish") {
      constexpr float scale = 1.0f;
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
496 497
      post_operations.append_eltwise(scale, dnnl::algorithm::eltwise_hardswish,
                                     alpha, beta);
M
Michał Gallus 已提交
498 499 500 501
    }

    attributes.set_post_ops(post_operations);
    return attributes;
502
  }
M
mozga-intel 已提交
503

504 505 506 507 508
  dnnl::inner_product_forward::primitive_desc CreateFcPrimDesc(
      const dnnl::memory::desc& input_desc,
      const dnnl::memory::desc& weights_desc,
      const dnnl::memory::desc& bias_desc, const dnnl::memory::desc& dst_desc,
      const dnnl::primitive_attr& attrs) {
509 510 511
    auto fc_desc =
        inner_product_forward::desc(prop_kind::forward_scoring, input_desc,
                                    weights_desc, bias_desc, dst_desc);
M
mozga-intel 已提交
512

M
Michał Gallus 已提交
513
    return inner_product_forward::primitive_desc(fc_desc, attrs, engine_);
514
  }
M
mozga-intel 已提交
515

M
Michał Gallus 已提交
516 517
  // Create output memory based on output tensor and inner_product
  // primitive descriptor format chosen for output
518 519
  dnnl::memory CreateDstMemory(
      const dnnl::inner_product_forward::primitive_desc& fc_prim_desc,
520
      const ExecutionContext& ctx, Tensor* output) {
A
Adam 已提交
521 522
    auto dst_desc = fc_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
M
Michał Gallus 已提交
523 524
    T_out* output_data =
        output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
525
    memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
526
    SetOutputFormat(ctx.Input<LoDTensor>("Input")->format(), output);
527

A
Adam 已提交
528
    return dst_mem;
529
  }
M
mozga-intel 已提交
530

531 532
  void RecomputeOutputDims(const ExecutionContext& ctx, const LoDTensor* input,
                           const Tensor* w, LoDTensor* output) {
L
luotao1 已提交
533
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
534 535 536 537
    bool padding_weights = ctx.Attr<bool>("padding_weights");
    PADDLE_ENFORCE_EQ(padding_weights, false,
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
538
    std::vector<int64_t> output_dims;
539 540
    FCOutputSize(input->dims(), w->dims(), output_dims, in_num_col_dims,
                 padding_weights);
L
luotao1 已提交
541 542
    output->Resize(framework::make_ddim(output_dims));
    output->set_lod(input->lod());
543
  }
L
luotao1 已提交
544

545
 private:
546
  const dnnl::engine& engine_;
547 548
  paddle::optional<memory> input_;
  paddle::optional<memory> output_;
549 550
  std::shared_ptr<memory> bias_;
  std::shared_ptr<memory> weights_;
551
  paddle::optional<inner_product_forward> fc_;
552
};
M
mozga-intel 已提交
553

M
Michał Gallus 已提交
554 555 556 557 558 559
// Attempt to fetch cached primitive factory based on provided parameters
// of input format, weight dimensions and output name.
// If not cached, create a new one.
template <typename T_in, typename T_w, typename T_out>
static std::shared_ptr<FCPrimitiveFactory<T_in, T_w, T_out>>
GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
560
                    const std::string& key) {
561
  auto prim_creator =
M
Michał Gallus 已提交
562 563
      std::static_pointer_cast<FCPrimitiveFactory<T_in, T_w, T_out>>(
          dev_ctx.GetBlob(key));
564
  if (prim_creator == nullptr) {
565 566
    prim_creator = std::make_shared<FCPrimitiveFactory<T_in, T_w, T_out>>(
        dev_ctx.GetEngine());
567
    dev_ctx.SetBlob(key, prim_creator);
M
mozga-intel 已提交
568 569
  }

570 571
  return prim_creator;
}
M
mozga-intel 已提交
572

M
Michał Gallus 已提交
573 574 575
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
576
static void ExecuteFc(const ExecutionContext& ctx, const LoDTensor* input,
A
Adam 已提交
577
                      const Tensor* w, const Tensor* bias, LoDTensor* output,
578 579
                      bool fuse_relu, bool force_fp32_output) {
  auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
580 581 582 583 584
  std::string prim_key = platform::CreateKey(
      dev_ctx, input->format(), input->dims()[0],
      framework::vectorize<int>(w->dims()), ctx.OutputName("Out"));
  prim_key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, prim_key);

M
Michał Gallus 已提交
585 586
  constexpr bool is_int8 =
      std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
587 588
  bool is_bfloat16 = std::is_same<T_in, paddle::platform::bfloat16>::value;
  if ((!is_int8 && !is_bfloat16) || force_fp32_output) {
589 590
    GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
591 592 593
  } else if (is_bfloat16) {
    GetPrimitiveFactory<T_in, T_w, platform::bfloat16>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
594
  } else if (fuse_relu) {
595 596
    GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
597
  } else {
598 599
    GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
600 601 602 603 604
  }
}

template <typename T_in, typename T_w>
class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
M
mozga-intel 已提交
605 606
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
Michał Gallus 已提交
607 608 609
    PADDLE_ENFORCE_EQ(
        platform::is_cpu_place(ctx.GetPlace()), true,
        platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace."));
610
    platform::MKLDNNDeviceContext::tls().log_lib_version();
611 612
    auto input = ctx.Input<LoDTensor>("Input");
    auto w = ctx.Input<Tensor>("W");
T
tensor-tang 已提交
613
    auto bias = ctx.Input<Tensor>("Bias");
614
    auto output = ctx.Output<LoDTensor>("Out");
M
mozga-intel 已提交
615

M
Michał Gallus 已提交
616 617 618
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

619 620
    ExecuteFc<T_in, T_w>(ctx, input, w, bias, output, fuse_relu,
                         force_fp32_output);
M
mozga-intel 已提交
621

622
    output->set_layout(DataLayout::kMKLDNN);
M
mozga-intel 已提交
623 624 625 626 627
  }
};
}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
628 629 630 631 632 633 634 635
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    FP32, ops::kFCMKLDNNFP32,
                                    ops::FCMKLDNNOpKernel<float, float>);

636 637 638 639 640
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(
    fc, MKLDNN, ::paddle::platform::CPUPlace, BF16, ops::kFCMKLDNNFP32,
    ops::FCMKLDNNOpKernel<paddle::platform::bfloat16,
                          paddle::platform::bfloat16>);

M
Michał Gallus 已提交
641 642 643 644 645 646 647
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    U8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<uint8_t, int8_t>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    S8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<int8_t, int8_t>);