fc_mkldnn_op.cc 28.8 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
18
#include "paddle/fluid/platform/mkldnn_helper.h"
W
wanghuancoder 已提交
19

20
namespace phi {
21
class DenseTensor;
22
}  // namespace phi
23

M
mozga-intel 已提交
24 25 26
namespace paddle {
namespace operators {

27 28 29 30 31
using dnnl::inner_product_forward;
using dnnl::memory;
using dnnl::primitive;
using dnnl::prop_kind;
using dnnl::stream;
32 33 34
using framework::DataLayout;
using framework::DDim;
using framework::ExecutionContext;
35 36 37
using framework::LoDTensor;
using framework::Tensor;
using platform::GetMKLDNNFormat;
38 39
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
M
mozga-intel 已提交
40

M
Michał Gallus 已提交
41
template <typename T_in, typename T_w, typename T_out>
42
class FCPrimitiveFactory {
M
mozga-intel 已提交
43
 public:
44
  explicit FCPrimitiveFactory(const dnnl::engine& engine) : engine_(engine) {}
45

46 47 48 49
  void ExecuteFcPrimitive(const LoDTensor* input,
                          const Tensor* weights,
                          const Tensor* bias,
                          LoDTensor* output,
50
                          const MKLDNNDeviceContext& dev_ctx,
A
Adam 已提交
51
                          const ExecutionContext& ctx) {
52
    RecomputeOutputDims(ctx, input, weights, output);
M
Michał Gallus 已提交
53 54
    // If primitive has already been created and cached, don't create new one,
    // but update input and output data pointers and return it.
55 56
    if (fc_) {
      UpdateDataPointers(ctx, output, input);
A
Adam 已提交
57 58
      this->Execute();
      return;
59
    }  // Otherwise, create a new one.
M
mozga-intel 已提交
60

61
    auto in_col_dims = ctx.Attr<int>("in_num_col_dims");
T
tianshuo78520a 已提交
62
    PADDLE_ENFORCE_LE(
63 64
        in_col_dims,
        2,
T
tianshuo78520a 已提交
65 66 67 68
        platform::errors::Unimplemented(
            "DNNL FC doesn't support in_num_col_dims parameter to "
            "be higher than "
            "2."));
69 70
    if (in_col_dims == 2) {
      PADDLE_ENFORCE_EQ(
71 72
          input->dims().size(),
          3,
73 74 75 76
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "3 dim input is provided."));
      PADDLE_ENFORCE_EQ(
77 78
          input->format(),
          MKLDNNMemoryFormat::ncw,
79 80 81 82 83
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "input format is equal to ncw."));
    }

84 85
    weights_ = CreateWeightsMemory(weights);

86 87 88 89 90
    // Since MKL-DNN has a lot of limitations on what the input/weights/output
    // dimensions should be, to simplify the code, the creation of primitive
    // descriptor has been divided into separate cases, based on the number
    // of input dimensions.
    size_t input_dim_num = input->dims().size();
91
    paddle::optional<dnnl::inner_product_forward::primitive_desc> fc_prim_desc;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    memory::desc usr_weights_desc = {};
    switch (input_dim_num) {
      case 2:
        fc_prim_desc =
            Create2DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create2DUserWeightsDesc();
        break;
      case 3:
        fc_prim_desc =
            Create3DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create3DUserWeightsDesc(weights);
        break;
      case 4:
        fc_prim_desc =
            Create4DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create4DUserWeightsDesc(input, weights);
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "DNNL FC doesn't support input dims different than 2, 3, 4."));
        break;
113
    }
114 115
    input_ = CreateMemory<T_in>(fc_prim_desc->src_desc(), input);
    // Update weights format inside of its memory
116 117
    weights_ = Reorder(
        usr_weights_desc, usr_weights_desc, weights_->get_data_handle());
118

119 120 121
    // Quantize weights and reorder to format chosen by FC primitive descriptor.
    QuantizeWeights(ctx, fc_prim_desc->weights_desc());

122
    bias_ = CreateMemoryToBeCached<float>(fc_prim_desc->bias_desc(), bias);
123 124
    // If int8 is desired, quantize bias into 32-bit signed int
    QuantizeBias(*fc_prim_desc, ctx);
M
mozga-intel 已提交
125

126 127 128
    // Store weights and bias in the mkldnn cache
    CacheWeightsAndBias(dev_ctx, ctx);

129 130 131 132 133 134
    // Based on format determined by inner_product, create output in desired
    // memory format
    output_ = CreateDstMemory(*fc_prim_desc, ctx, output);

    // Return MKL-DNN primitive ready to be fed into pipeline and executed
    fc_ = inner_product_forward(*fc_prim_desc);
A
Adam 已提交
135 136 137 138
    this->Execute();
  }

  void Execute() {
139
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
140
    if (bias_) {
141 142 143 144 145
      fc_->execute(astream,
                   {{DNNL_ARG_SRC, *input_},
                    {DNNL_ARG_WEIGHTS, *weights_},
                    {DNNL_ARG_BIAS, *bias_},
                    {DNNL_ARG_DST, *output_}});
A
Adam 已提交
146
    } else {
147 148 149 150
      fc_->execute(astream,
                   {{DNNL_ARG_SRC, *input_},
                    {DNNL_ARG_WEIGHTS, *weights_},
                    {DNNL_ARG_DST, *output_}});
A
Adam 已提交
151 152
    }
    astream.wait();
M
mozga-intel 已提交
153 154
  }

155
 private:
156 157 158 159 160 161 162 163 164 165
  // DNNL always returns 2-dimensional data block as a result of computing
  // inner product. Hence the format 'nc' is always set for its output
  // primitive. Therefore, function SetOutputFormat is needed to choose
  // an appropriate format based on the number of input dimensions and
  // format of an input tensor.
  void SetOutputFormat(MKLDNNMemoryFormat in_format, Tensor* out) {
    int dim_num = out->dims().size();
    // In case of 2 dims, we set the only possible format, nc
    if (dim_num == 2) {
      out->set_format(MKLDNNMemoryFormat::nc);
P
piotrekobi 已提交
166 167 168
      out->set_mem_desc({phi::vectorize(out->dims()),
                         platform::MKLDNNGetDataType<T_out>(),
                         out->format()});
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
      // In case of 3 dims, we generate a format that is based on number
      // of output dims and the layout of input format (nchw or nhwc).
    } else if (dim_num == 3) {
      if (in_format == MKLDNNMemoryFormat::nwc ||
          in_format == MKLDNNMemoryFormat::nhwc) {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nhwc));
      } else {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nchw));
      }
      // In any other case we overwrite the output format with the input one.
    } else {
      out->set_format(in_format);
    }
  }

186 187
  void UpdateDataPointers(const ExecutionContext& ctx,
                          Tensor* out,
188
                          const Tensor* in) {
M
Michał Gallus 已提交
189 190 191 192 193
    input_->set_data_handle(to_void_cast(in->data<T_in>()));
    output_->set_data_handle(out->mutable_data<T_out>(ctx.GetPlace()));
    // If the primitive exists, but the output tensor has changed its
    // variable, update its format to what has been determined in first
    // call to CreateFcPrimitive method.
A
Adam 已提交
194
    if (out->format() == MKLDNNMemoryFormat::undef) {
195
      SetOutputFormat(in->format(), out);
196
    }
M
mozga-intel 已提交
197 198
  }

199
  dnnl::inner_product_forward::primitive_desc Create2DFcPrimDescriptor(
200 201 202 203 204
      const LoDTensor* input,
      const Tensor* weights,
      const Tensor* bias,
      LoDTensor* output,
      const ExecutionContext& ctx) {
205
    auto src_desc = CreateMemDescriptor<T_in>(input, MKLDNNMemoryFormat::any);
206 207 208 209 210
    auto weight_dims = Get2DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
211
    const auto attrs = CreateFCAttrs(ctx);
212 213 214 215
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get2DWeightDimsForDNNL(const Tensor* weights) {
216
    auto dims = phi::vectorize(weights->dims());
217 218 219 220 221 222
    std::swap(dims[0], dims[1]);  // swap input dim with output dim
    return dims;
  }

  memory::desc Create2DUserWeightsDesc() { return weights_->get_desc(); }

223
  dnnl::inner_product_forward::primitive_desc Create3DFcPrimDescriptor(
224 225 226 227 228
      const LoDTensor* input,
      const Tensor* weights,
      const Tensor* bias,
      LoDTensor* output,
      const ExecutionContext& ctx) {
229
    auto input_dims = phi::vectorize(input->dims());
230 231
    std::vector<int64_t> new_input_dims = {
        input_dims[0] * input_dims[1], input_dims[2], 1};
232 233
    auto src_desc =
        CreateMemDescriptor<T_in>(new_input_dims, MKLDNNMemoryFormat::any);
234 235 236 237 238 239 240 241 242 243

    auto weight_dims = Get3DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);

    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);

    auto dst_dims = {input_dims[0] * input_dims[1], weight_dims[0]};
    auto dst_desc =
        CreateMemDescriptor<T_out>(dst_dims, MKLDNNMemoryFormat::any);
244
    const auto attrs = CreateFCAttrs(ctx);
245 246 247 248
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get3DWeightDimsForDNNL(const Tensor* weights) {
249
    auto paddle_w_dims = phi::vectorize(weights->dims());
250
    return {paddle_w_dims[1], paddle_w_dims[0], 1};
251 252 253 254 255 256 257
  }

  memory::desc Create3DUserWeightsDesc(const Tensor* weights) {
    auto dims = Get3DWeightDimsForDNNL(weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oiw);
  }

258
  dnnl::inner_product_forward::primitive_desc Create4DFcPrimDescriptor(
259 260 261 262 263
      const LoDTensor* input,
      const Tensor* weights,
      const Tensor* bias,
      LoDTensor* output,
      const ExecutionContext& ctx) {
264
    auto src_desc = CreateMemDescriptor<T_in>(input, MKLDNNMemoryFormat::any);
265 266 267 268 269 270 271
    // Since MKL-DNN doesn't support 4D column-major data formats in
    // inner_product primitive, transpose the weights to be in
    // row-major format
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    auto weights_desc = CreateMemDescriptor<T_w>(dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
272
    const auto attrs = CreateFCAttrs(ctx);
273 274 275 276 277
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get4DWeightDimsForDNNL(const LoDTensor* input,
                                              const Tensor* weights) {
278 279
    auto old_w_dims = phi::vectorize(weights->dims());
    auto old_in_dims = phi::vectorize(input->dims());
280 281 282 283 284 285 286 287
    auto dims = {old_w_dims[1], old_in_dims[1], old_in_dims[2], old_in_dims[3]};
    return dims;
  }

  memory::desc Create4DUserWeightsDesc(const LoDTensor* input,
                                       const Tensor* weights) {
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oihw);
M
mozga-intel 已提交
288 289
  }

M
Michał Gallus 已提交
290
  // Convert data from one data format to another
291 292 293
  std::shared_ptr<dnnl::memory> Reorder(const memory::desc& src_desc,
                                        const memory::desc& dst_desc,
                                        void* src_data) {
A
Adam 已提交
294
    auto src_mem = memory(src_desc, engine_, src_data);
295
    auto dst_mem = std::make_shared<memory>(dst_desc, engine_);
M
mozga-intel 已提交
296

297
    auto reorder = dnnl::reorder(src_mem, *dst_mem);
298
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
299 300

    {
C
chenjian 已提交
301
      platform::RecordEvent record_reorder(
302 303 304
          "int_reorder",
          platform::TracerEventType::UserDefined,
          2,
C
chenjian 已提交
305
          platform::EventRole::kUniqueOp);
306 307 308
      reorder.execute(astream, src_mem, *dst_mem);
      astream.wait();
    }
M
mozga-intel 已提交
309

310
    return dst_mem;
M
mozga-intel 已提交
311 312
  }

M
Michał Gallus 已提交
313 314
  // Convert data from one data format to another and rescale it.
  // If the desired data type is (un)signed int8, quantization occurs here.
315
  std::shared_ptr<dnnl::memory> ReorderWithScale(
316 317
      const std::shared_ptr<memory> src_mem,
      const memory::desc& dst_md,
318
      const std::vector<float>& scale_data) {
319 320
    auto dst_mem = std::make_shared<dnnl::memory>(dst_md, engine_);
    dnnl::primitive_attr attributes;
M
Michał Gallus 已提交
321 322 323 324 325 326 327 328
    // According to MKL-DNN's documentation mask determines along which
    // dimensions should the scale be applied.
    // 0 - Single scale applied to whole tensor
    // 1 - Apply Scale along a slice of each dimension which index is 1.
    //     In case of weights quantization, that dimension is output,
    //     becuase we perform per-output-channel quantization
    int mask = CreateMask(0, scale_data.size() > 1);
    attributes.set_output_scales(mask, scale_data);
329
    auto reorder = dnnl::reorder(*src_mem, *dst_mem, attributes);
M
Michał Gallus 已提交
330

331
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
332
    {
C
chenjian 已提交
333
      platform::RecordEvent record_reorder(
334 335 336
          "int_reorder",
          platform::TracerEventType::UserDefined,
          2,
C
chenjian 已提交
337
          platform::EventRole::kUniqueOp);
338
      reorder.execute(astream,
339
                      {{DNNL_ARG_FROM, *src_mem}, {DNNL_ARG_TO, *dst_mem}});
340 341
      astream.wait();
    }
M
Michał Gallus 已提交
342 343 344 345 346

    return dst_mem;
  }

  template <typename T>
347
  static dnnl::memory::desc CreateMemDescriptor(
A
Adam 已提交
348
      const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
349 350
    return platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), format);
M
mozga-intel 已提交
351 352
  }

M
Michał Gallus 已提交
353
  template <typename T>
354 355
  static dnnl::memory::desc CreateMemDescriptor(const Tensor* tensor,
                                                MKLDNNMemoryFormat format) {
356
    auto dims = phi::vectorize(tensor->dims());
M
Michał Gallus 已提交
357
    return CreateMemDescriptor<T>(dims, format);
M
mozga-intel 已提交
358 359
  }

M
Michał Gallus 已提交
360
  template <typename T>
361 362
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc,
                            const Tensor* tensor) {
A
Adam 已提交
363
    return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
M
mozga-intel 已提交
364 365
  }

366
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc, void* data) {
A
Adam 已提交
367
    return memory(desc, engine_, data);
M
mozga-intel 已提交
368 369
  }

370
  template <typename T>
371 372
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, const Tensor* tensor) {
373 374 375 376
    return CreateMemoryToBeCached(desc,
                                  platform::to_void_cast<T>(tensor->data<T>()));
  }

377 378
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, void* data) {
379 380 381 382
    return std::make_shared<memory>(desc, engine_, data);
  }

  // Create weights memory and transform to default MKL-DNN format
383
  std::shared_ptr<dnnl::memory> CreateWeightsMemory(const Tensor* weights) {
384
    auto dims = phi::vectorize(weights->dims());
385
    std::swap(dims[0], dims[1]);  // Correct output dimensions
M
Michał Gallus 已提交
386 387
    auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
388
    // Transpose weights through MKL-DNN's reorder from io to oi format.
389 390
    return Reorder(src_desc,
                   dst_desc,
A
Adam 已提交
391
                   platform::to_void_cast<float>(weights->data<float>()));
M
Michał Gallus 已提交
392 393
  }

394 395
  void CacheWeightsAndBias(const MKLDNNDeviceContext& dev_ctx,
                           const ExecutionContext& ctx) {
396 397 398
    std::string key = platform::CreateKey(dev_ctx);
    key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);

399 400 401 402 403 404
    const std::string weights_key = key + ctx.InputName("W");
    const std::string bias_key = key + ctx.InputName("Bias");
    dev_ctx.SetBlob(weights_key, weights_);
    dev_ctx.SetBlob(bias_key, bias_);
  }

M
Michał Gallus 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
  std::vector<float> ComputeBiasScales(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> bias_scales(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        bias_scales[i] = 1.0f;
      else
        bias_scales[i] = scale_in_data * scale_weights_data[i];
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
429 430
  std::tuple<std::vector<float>, float> ComputeOutputShiftScale(
      const ExecutionContext& ctx) {
M
Michał Gallus 已提交
431 432
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
433 434
    bool has_activation = !ctx.Attr<std::string>("activation_type").empty();
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
435

M
Michał Gallus 已提交
436
    // If the output will be in floats, we don't multiply by scale_out.
437

438 439 440 441 442 443
    float scale = (!force_fp32_output && has_activation)
                      ? ctx.Attr<float>("Scale_out")
                      : 1.0f;
    float inner_scale = (force_fp32_output || has_activation)
                            ? 1.0f
                            : ctx.Attr<float>("Scale_out");
M
Michał Gallus 已提交
444 445 446 447 448 449
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> output_shift_scale(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
450
        output_shift_scale[i] = inner_scale;
M
Michał Gallus 已提交
451 452
      else
        output_shift_scale[i] =
453
            inner_scale / (scale_in_data * scale_weights_data[i]);
M
Michał Gallus 已提交
454 455
    }

456
    return make_tuple(output_shift_scale, scale);
M
Michał Gallus 已提交
457 458 459 460 461 462 463 464 465 466
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

467
  void QuantizeWeights(const ExecutionContext& ctx, memory::desc dst) {
468 469
    weights_ = ReorderWithScale(
        weights_, dst, ctx.Attr<std::vector<float>>("Scale_weights"));
M
Michał Gallus 已提交
470 471 472 473 474
  }

  void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
                    const ExecutionContext& ctx) {
    auto bias_scales = ComputeBiasScales(ctx);
475
    bias_ = ReorderWithScale(bias_, fc_prim_desc.bias_desc(), bias_scales);
M
Michał Gallus 已提交
476 477
  }

478
  dnnl::primitive_attr CreateFCAttrs(const ExecutionContext& ctx) {
479 480
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
M
Michał Gallus 已提交
481

482 483 484
    std::vector<float> output_shift_scale;
    float scale;
    std::tie(output_shift_scale, scale) = ComputeOutputShiftScale(ctx);
M
Michał Gallus 已提交
485 486
    int mask = CreateMask(1, output_shift_scale.size() > 1);
    attributes.set_output_scales(mask, output_shift_scale);
487

488
    float sum_scale = 1.0f;
489 490 491 492
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
      post_operations.append_sum(sum_scale);
    }
M
Michał Gallus 已提交
493 494 495 496

    if (ctx.Attr<std::string>("activation_type") == "relu") {
      constexpr float negative_slope = 0.0f;
      constexpr float placeholder = 1.0f;  // beta
497 498
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_relu, negative_slope, placeholder);
499 500 501
    } else if (ctx.Attr<std::string>("activation_type") == "gelu") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
502 503
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_gelu, alpha, beta);
504 505 506
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_tanh") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
507 508
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_gelu_tanh, alpha, beta);
509 510 511
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_erf") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
512 513
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_gelu_erf, alpha, beta);
514 515 516
    } else if (ctx.Attr<std::string>("activation_type") == "tanh") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
517 518
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_tanh, alpha, beta);
519 520 521
    } else if (ctx.Attr<std::string>("activation_type") == "sigmoid") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
522 523
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_logistic, alpha, beta);
524 525 526
    } else if (ctx.Attr<std::string>("activation_type") == "mish") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
527 528
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_mish, alpha, beta);
J
jakpiase 已提交
529 530 531
    } else if (ctx.Attr<std::string>("activation_type") == "hard_swish") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
532 533
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_hardswish, alpha, beta);
M
Michał Gallus 已提交
534 535 536 537
    }

    attributes.set_post_ops(post_operations);
    return attributes;
538
  }
M
mozga-intel 已提交
539

540 541 542
  dnnl::inner_product_forward::primitive_desc CreateFcPrimDesc(
      const dnnl::memory::desc& input_desc,
      const dnnl::memory::desc& weights_desc,
543 544
      const dnnl::memory::desc& bias_desc,
      const dnnl::memory::desc& dst_desc,
545
      const dnnl::primitive_attr& attrs) {
546 547 548 549 550
    auto fc_desc = inner_product_forward::desc(prop_kind::forward_scoring,
                                               input_desc,
                                               weights_desc,
                                               bias_desc,
                                               dst_desc);
M
mozga-intel 已提交
551

M
Michał Gallus 已提交
552
    return inner_product_forward::primitive_desc(fc_desc, attrs, engine_);
553
  }
M
mozga-intel 已提交
554

M
Michał Gallus 已提交
555 556
  // Create output memory based on output tensor and inner_product
  // primitive descriptor format chosen for output
557 558
  dnnl::memory CreateDstMemory(
      const dnnl::inner_product_forward::primitive_desc& fc_prim_desc,
559 560
      const ExecutionContext& ctx,
      Tensor* output) {
561 562 563 564 565
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
      auto* residual_param = ctx.Output<Tensor>("ResidualData");

      PADDLE_ENFORCE_EQ(
566 567
          output->dims(),
          residual_param->dims(),
568 569 570 571
          platform::errors::InvalidArgument(
              "Output and elementwise parameter need to have the "
              "same dimension sizes, but got output's dimension = %d"
              " and residual param's dimension =%d .",
572 573
              output->dims().size(),
              residual_param->dims().size()));
574 575 576 577

      output->ShareDataWith(*residual_param);
    }

A
Adam 已提交
578 579
    auto dst_desc = fc_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
M
Michał Gallus 已提交
580 581
    T_out* output_data =
        output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
582
    memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
583
    SetOutputFormat(ctx.Input<LoDTensor>("Input")->format(), output);
584

A
Adam 已提交
585
    return dst_mem;
586
  }
M
mozga-intel 已提交
587

588 589 590 591
  void RecomputeOutputDims(const ExecutionContext& ctx,
                           const LoDTensor* input,
                           const Tensor* w,
                           LoDTensor* output) {
L
luotao1 已提交
592
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
593
    bool padding_weights = ctx.Attr<bool>("padding_weights");
594 595
    PADDLE_ENFORCE_EQ(padding_weights,
                      false,
596 597
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
598
    std::vector<int64_t> output_dims;
599 600 601 602
    FCOutputSize(input->dims(),
                 w->dims(),
                 output_dims,
                 in_num_col_dims,
603
                 padding_weights);
604
    output->Resize(phi::make_ddim(output_dims));
L
luotao1 已提交
605
    output->set_lod(input->lod());
606
  }
L
luotao1 已提交
607

608
 private:
609
  const dnnl::engine& engine_;
610 611
  paddle::optional<memory> input_;
  paddle::optional<memory> output_;
612 613
  std::shared_ptr<memory> bias_;
  std::shared_ptr<memory> weights_;
614
  paddle::optional<inner_product_forward> fc_;
615
};
M
mozga-intel 已提交
616

M
Michał Gallus 已提交
617 618 619 620 621 622
// Attempt to fetch cached primitive factory based on provided parameters
// of input format, weight dimensions and output name.
// If not cached, create a new one.
template <typename T_in, typename T_w, typename T_out>
static std::shared_ptr<FCPrimitiveFactory<T_in, T_w, T_out>>
GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
623
                    const std::string& key) {
624
  auto prim_creator =
M
Michał Gallus 已提交
625 626
      std::static_pointer_cast<FCPrimitiveFactory<T_in, T_w, T_out>>(
          dev_ctx.GetBlob(key));
627
  if (prim_creator == nullptr) {
628 629
    prim_creator = std::make_shared<FCPrimitiveFactory<T_in, T_w, T_out>>(
        dev_ctx.GetEngine());
630
    dev_ctx.SetBlob(key, prim_creator);
M
mozga-intel 已提交
631 632
  }

633 634
  return prim_creator;
}
M
mozga-intel 已提交
635

M
Michał Gallus 已提交
636 637 638
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
639 640 641 642 643 644 645
static void ExecuteFc(const ExecutionContext& ctx,
                      const LoDTensor* input,
                      const Tensor* w,
                      const Tensor* bias,
                      LoDTensor* output,
                      bool fuse_relu,
                      bool force_fp32_output) {
646
  auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
647 648 649 650 651
  std::string prim_key = platform::CreateKey(dev_ctx,
                                             input->format(),
                                             input->dims()[0],
                                             phi::vectorize<int>(w->dims()),
                                             ctx.OutputName("Out"));
652 653
  prim_key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, prim_key);

M
Michał Gallus 已提交
654 655
  constexpr bool is_int8 =
      std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
656 657
  bool is_bfloat16 = std::is_same<T_in, paddle::platform::bfloat16>::value;
  if ((!is_int8 && !is_bfloat16) || force_fp32_output) {
658 659
    GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
660 661 662
  } else if (is_bfloat16) {
    GetPrimitiveFactory<T_in, T_w, platform::bfloat16>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
663
  } else if (fuse_relu) {
664 665
    GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
666
  } else {
667 668
    GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
669 670 671 672 673
  }
}

template <typename T_in, typename T_w>
class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
M
mozga-intel 已提交
674 675
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
Michał Gallus 已提交
676
    PADDLE_ENFORCE_EQ(
677 678
        platform::is_cpu_place(ctx.GetPlace()),
        true,
M
Michał Gallus 已提交
679
        platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace."));
680
    platform::MKLDNNDeviceContext::tls().log_lib_version();
681 682
    auto input = ctx.Input<LoDTensor>("Input");
    auto w = ctx.Input<Tensor>("W");
T
tensor-tang 已提交
683
    auto bias = ctx.Input<Tensor>("Bias");
684
    auto output = ctx.Output<LoDTensor>("Out");
M
mozga-intel 已提交
685

M
Michał Gallus 已提交
686 687 688
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

689 690
    ExecuteFc<T_in, T_w>(
        ctx, input, w, bias, output, fuse_relu, force_fp32_output);
M
mozga-intel 已提交
691

692
    output->set_layout(DataLayout::kMKLDNN);
M
mozga-intel 已提交
693 694 695 696 697
  }
};
}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
698 699 700 701
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
702 703 704 705 706
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc,
                                    MKLDNN,
                                    ::paddle::platform::CPUPlace,
                                    FP32,
                                    ops::kFCMKLDNNFP32,
M
Michał Gallus 已提交
707 708
                                    ops::FCMKLDNNOpKernel<float, float>);

709
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(
710 711 712 713 714
    fc,
    MKLDNN,
    ::paddle::platform::CPUPlace,
    BF16,
    ops::kFCMKLDNNFP32,
715 716 717
    ops::FCMKLDNNOpKernel<paddle::platform::bfloat16,
                          paddle::platform::bfloat16>);

718 719 720 721 722
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc,
                                    MKLDNN,
                                    ::paddle::platform::CPUPlace,
                                    U8,
                                    ops::kFCMKLDNNINT8,
M
Michał Gallus 已提交
723 724
                                    ops::FCMKLDNNOpKernel<uint8_t, int8_t>);

725 726 727 728 729
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc,
                                    MKLDNN,
                                    ::paddle::platform::CPUPlace,
                                    S8,
                                    ops::kFCMKLDNNINT8,
M
Michał Gallus 已提交
730
                                    ops::FCMKLDNNOpKernel<int8_t, int8_t>);