fc_mkldnn_op.cc 28.9 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
18
#include "paddle/fluid/platform/mkldnn_helper.h"
W
wanghuancoder 已提交
19

20
namespace phi {
21
class DenseTensor;
22
}  // namespace phi
23

W
wanghuancoder 已提交
24
namespace paddle {
25
namespace framework {}  // namespace framework
W
wanghuancoder 已提交
26 27 28 29
namespace platform {
class MKLDNNDeviceContext;
}  // namespace platform
}  // namespace paddle
M
mozga-intel 已提交
30 31 32 33

namespace paddle {
namespace operators {

34 35 36 37 38
using dnnl::inner_product_forward;
using dnnl::memory;
using dnnl::primitive;
using dnnl::prop_kind;
using dnnl::stream;
39 40 41
using framework::DataLayout;
using framework::DDim;
using framework::ExecutionContext;
42 43 44
using framework::LoDTensor;
using framework::Tensor;
using platform::GetMKLDNNFormat;
45 46
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
M
mozga-intel 已提交
47

M
Michał Gallus 已提交
48
template <typename T_in, typename T_w, typename T_out>
49
class FCPrimitiveFactory {
M
mozga-intel 已提交
50
 public:
51
  explicit FCPrimitiveFactory(const dnnl::engine& engine) : engine_(engine) {}
52

53 54 55 56
  void ExecuteFcPrimitive(const LoDTensor* input,
                          const Tensor* weights,
                          const Tensor* bias,
                          LoDTensor* output,
57
                          const MKLDNNDeviceContext& dev_ctx,
A
Adam 已提交
58
                          const ExecutionContext& ctx) {
59
    RecomputeOutputDims(ctx, input, weights, output);
M
Michał Gallus 已提交
60 61
    // If primitive has already been created and cached, don't create new one,
    // but update input and output data pointers and return it.
62 63
    if (fc_) {
      UpdateDataPointers(ctx, output, input);
A
Adam 已提交
64 65
      this->Execute();
      return;
66
    }  // Otherwise, create a new one.
M
mozga-intel 已提交
67

68
    auto in_col_dims = ctx.Attr<int>("in_num_col_dims");
T
tianshuo78520a 已提交
69
    PADDLE_ENFORCE_LE(
70 71
        in_col_dims,
        2,
T
tianshuo78520a 已提交
72 73 74 75
        platform::errors::Unimplemented(
            "DNNL FC doesn't support in_num_col_dims parameter to "
            "be higher than "
            "2."));
76 77
    if (in_col_dims == 2) {
      PADDLE_ENFORCE_EQ(
78 79
          input->dims().size(),
          3,
80 81 82 83
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "3 dim input is provided."));
      PADDLE_ENFORCE_EQ(
84 85
          input->format(),
          MKLDNNMemoryFormat::ncw,
86 87 88 89 90
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "input format is equal to ncw."));
    }

91 92
    weights_ = CreateWeightsMemory(weights);

93 94 95 96 97
    // Since MKL-DNN has a lot of limitations on what the input/weights/output
    // dimensions should be, to simplify the code, the creation of primitive
    // descriptor has been divided into separate cases, based on the number
    // of input dimensions.
    size_t input_dim_num = input->dims().size();
98
    paddle::optional<dnnl::inner_product_forward::primitive_desc> fc_prim_desc;
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
    memory::desc usr_weights_desc = {};
    switch (input_dim_num) {
      case 2:
        fc_prim_desc =
            Create2DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create2DUserWeightsDesc();
        break;
      case 3:
        fc_prim_desc =
            Create3DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create3DUserWeightsDesc(weights);
        break;
      case 4:
        fc_prim_desc =
            Create4DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create4DUserWeightsDesc(input, weights);
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "DNNL FC doesn't support input dims different than 2, 3, 4."));
        break;
120
    }
121 122
    input_ = CreateMemory<T_in>(fc_prim_desc->src_desc(), input);
    // Update weights format inside of its memory
123 124
    weights_ = Reorder(
        usr_weights_desc, usr_weights_desc, weights_->get_data_handle());
125

126 127 128
    // Quantize weights and reorder to format chosen by FC primitive descriptor.
    QuantizeWeights(ctx, fc_prim_desc->weights_desc());

129
    bias_ = CreateMemoryToBeCached<float>(fc_prim_desc->bias_desc(), bias);
130 131
    // If int8 is desired, quantize bias into 32-bit signed int
    QuantizeBias(*fc_prim_desc, ctx);
M
mozga-intel 已提交
132

133 134 135
    // Store weights and bias in the mkldnn cache
    CacheWeightsAndBias(dev_ctx, ctx);

136 137 138 139 140 141
    // Based on format determined by inner_product, create output in desired
    // memory format
    output_ = CreateDstMemory(*fc_prim_desc, ctx, output);

    // Return MKL-DNN primitive ready to be fed into pipeline and executed
    fc_ = inner_product_forward(*fc_prim_desc);
A
Adam 已提交
142 143 144 145
    this->Execute();
  }

  void Execute() {
146
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
147
    if (bias_) {
148 149 150 151 152
      fc_->execute(astream,
                   {{DNNL_ARG_SRC, *input_},
                    {DNNL_ARG_WEIGHTS, *weights_},
                    {DNNL_ARG_BIAS, *bias_},
                    {DNNL_ARG_DST, *output_}});
A
Adam 已提交
153
    } else {
154 155 156 157
      fc_->execute(astream,
                   {{DNNL_ARG_SRC, *input_},
                    {DNNL_ARG_WEIGHTS, *weights_},
                    {DNNL_ARG_DST, *output_}});
A
Adam 已提交
158 159
    }
    astream.wait();
M
mozga-intel 已提交
160 161
  }

162
 private:
163 164 165 166 167 168 169 170 171 172
  // DNNL always returns 2-dimensional data block as a result of computing
  // inner product. Hence the format 'nc' is always set for its output
  // primitive. Therefore, function SetOutputFormat is needed to choose
  // an appropriate format based on the number of input dimensions and
  // format of an input tensor.
  void SetOutputFormat(MKLDNNMemoryFormat in_format, Tensor* out) {
    int dim_num = out->dims().size();
    // In case of 2 dims, we set the only possible format, nc
    if (dim_num == 2) {
      out->set_format(MKLDNNMemoryFormat::nc);
P
piotrekobi 已提交
173 174 175
      out->set_mem_desc({phi::vectorize(out->dims()),
                         platform::MKLDNNGetDataType<T_out>(),
                         out->format()});
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
      // In case of 3 dims, we generate a format that is based on number
      // of output dims and the layout of input format (nchw or nhwc).
    } else if (dim_num == 3) {
      if (in_format == MKLDNNMemoryFormat::nwc ||
          in_format == MKLDNNMemoryFormat::nhwc) {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nhwc));
      } else {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nchw));
      }
      // In any other case we overwrite the output format with the input one.
    } else {
      out->set_format(in_format);
    }
  }

193 194
  void UpdateDataPointers(const ExecutionContext& ctx,
                          Tensor* out,
195
                          const Tensor* in) {
M
Michał Gallus 已提交
196 197 198 199 200
    input_->set_data_handle(to_void_cast(in->data<T_in>()));
    output_->set_data_handle(out->mutable_data<T_out>(ctx.GetPlace()));
    // If the primitive exists, but the output tensor has changed its
    // variable, update its format to what has been determined in first
    // call to CreateFcPrimitive method.
A
Adam 已提交
201
    if (out->format() == MKLDNNMemoryFormat::undef) {
202
      SetOutputFormat(in->format(), out);
203
    }
M
mozga-intel 已提交
204 205
  }

206
  dnnl::inner_product_forward::primitive_desc Create2DFcPrimDescriptor(
207 208 209 210 211
      const LoDTensor* input,
      const Tensor* weights,
      const Tensor* bias,
      LoDTensor* output,
      const ExecutionContext& ctx) {
212
    auto src_desc = CreateMemDescriptor<T_in>(input, MKLDNNMemoryFormat::any);
213 214 215 216 217
    auto weight_dims = Get2DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
218
    const auto attrs = CreateFCAttrs(ctx);
219 220 221 222
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get2DWeightDimsForDNNL(const Tensor* weights) {
223
    auto dims = phi::vectorize(weights->dims());
224 225 226 227 228 229
    std::swap(dims[0], dims[1]);  // swap input dim with output dim
    return dims;
  }

  memory::desc Create2DUserWeightsDesc() { return weights_->get_desc(); }

230
  dnnl::inner_product_forward::primitive_desc Create3DFcPrimDescriptor(
231 232 233 234 235
      const LoDTensor* input,
      const Tensor* weights,
      const Tensor* bias,
      LoDTensor* output,
      const ExecutionContext& ctx) {
236
    auto input_dims = phi::vectorize(input->dims());
237 238
    std::vector<int64_t> new_input_dims = {
        input_dims[0] * input_dims[1], input_dims[2], 1};
239 240
    auto src_desc =
        CreateMemDescriptor<T_in>(new_input_dims, MKLDNNMemoryFormat::any);
241 242 243 244 245 246 247 248 249 250

    auto weight_dims = Get3DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);

    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);

    auto dst_dims = {input_dims[0] * input_dims[1], weight_dims[0]};
    auto dst_desc =
        CreateMemDescriptor<T_out>(dst_dims, MKLDNNMemoryFormat::any);
251
    const auto attrs = CreateFCAttrs(ctx);
252 253 254 255
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get3DWeightDimsForDNNL(const Tensor* weights) {
256
    auto paddle_w_dims = phi::vectorize(weights->dims());
257
    return {paddle_w_dims[1], paddle_w_dims[0], 1};
258 259 260 261 262 263 264
  }

  memory::desc Create3DUserWeightsDesc(const Tensor* weights) {
    auto dims = Get3DWeightDimsForDNNL(weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oiw);
  }

265
  dnnl::inner_product_forward::primitive_desc Create4DFcPrimDescriptor(
266 267 268 269 270
      const LoDTensor* input,
      const Tensor* weights,
      const Tensor* bias,
      LoDTensor* output,
      const ExecutionContext& ctx) {
271
    auto src_desc = CreateMemDescriptor<T_in>(input, MKLDNNMemoryFormat::any);
272 273 274 275 276 277 278
    // Since MKL-DNN doesn't support 4D column-major data formats in
    // inner_product primitive, transpose the weights to be in
    // row-major format
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    auto weights_desc = CreateMemDescriptor<T_w>(dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
279
    const auto attrs = CreateFCAttrs(ctx);
280 281 282 283 284
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get4DWeightDimsForDNNL(const LoDTensor* input,
                                              const Tensor* weights) {
285 286
    auto old_w_dims = phi::vectorize(weights->dims());
    auto old_in_dims = phi::vectorize(input->dims());
287 288 289 290 291 292 293 294
    auto dims = {old_w_dims[1], old_in_dims[1], old_in_dims[2], old_in_dims[3]};
    return dims;
  }

  memory::desc Create4DUserWeightsDesc(const LoDTensor* input,
                                       const Tensor* weights) {
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oihw);
M
mozga-intel 已提交
295 296
  }

M
Michał Gallus 已提交
297
  // Convert data from one data format to another
298 299 300
  std::shared_ptr<dnnl::memory> Reorder(const memory::desc& src_desc,
                                        const memory::desc& dst_desc,
                                        void* src_data) {
A
Adam 已提交
301
    auto src_mem = memory(src_desc, engine_, src_data);
302
    auto dst_mem = std::make_shared<memory>(dst_desc, engine_);
M
mozga-intel 已提交
303

304
    auto reorder = dnnl::reorder(src_mem, *dst_mem);
305
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
306 307

    {
C
chenjian 已提交
308
      platform::RecordEvent record_reorder(
309 310 311
          "int_reorder",
          platform::TracerEventType::UserDefined,
          2,
C
chenjian 已提交
312
          platform::EventRole::kUniqueOp);
313 314 315
      reorder.execute(astream, src_mem, *dst_mem);
      astream.wait();
    }
M
mozga-intel 已提交
316

317
    return dst_mem;
M
mozga-intel 已提交
318 319
  }

M
Michał Gallus 已提交
320 321
  // Convert data from one data format to another and rescale it.
  // If the desired data type is (un)signed int8, quantization occurs here.
322
  std::shared_ptr<dnnl::memory> ReorderWithScale(
323 324
      const std::shared_ptr<memory> src_mem,
      const memory::desc& dst_md,
325
      const std::vector<float>& scale_data) {
326 327
    auto dst_mem = std::make_shared<dnnl::memory>(dst_md, engine_);
    dnnl::primitive_attr attributes;
M
Michał Gallus 已提交
328 329 330 331 332 333 334 335
    // According to MKL-DNN's documentation mask determines along which
    // dimensions should the scale be applied.
    // 0 - Single scale applied to whole tensor
    // 1 - Apply Scale along a slice of each dimension which index is 1.
    //     In case of weights quantization, that dimension is output,
    //     becuase we perform per-output-channel quantization
    int mask = CreateMask(0, scale_data.size() > 1);
    attributes.set_output_scales(mask, scale_data);
336
    auto reorder = dnnl::reorder(*src_mem, *dst_mem, attributes);
M
Michał Gallus 已提交
337

338
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
339
    {
C
chenjian 已提交
340
      platform::RecordEvent record_reorder(
341 342 343
          "int_reorder",
          platform::TracerEventType::UserDefined,
          2,
C
chenjian 已提交
344
          platform::EventRole::kUniqueOp);
345
      reorder.execute(astream,
346
                      {{DNNL_ARG_FROM, *src_mem}, {DNNL_ARG_TO, *dst_mem}});
347 348
      astream.wait();
    }
M
Michał Gallus 已提交
349 350 351 352 353

    return dst_mem;
  }

  template <typename T>
354
  static dnnl::memory::desc CreateMemDescriptor(
A
Adam 已提交
355
      const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
356 357
    return platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), format);
M
mozga-intel 已提交
358 359
  }

M
Michał Gallus 已提交
360
  template <typename T>
361 362
  static dnnl::memory::desc CreateMemDescriptor(const Tensor* tensor,
                                                MKLDNNMemoryFormat format) {
363
    auto dims = phi::vectorize(tensor->dims());
M
Michał Gallus 已提交
364
    return CreateMemDescriptor<T>(dims, format);
M
mozga-intel 已提交
365 366
  }

M
Michał Gallus 已提交
367
  template <typename T>
368 369
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc,
                            const Tensor* tensor) {
A
Adam 已提交
370
    return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
M
mozga-intel 已提交
371 372
  }

373
  dnnl::memory CreateMemory(const dnnl::memory::desc& desc, void* data) {
A
Adam 已提交
374
    return memory(desc, engine_, data);
M
mozga-intel 已提交
375 376
  }

377
  template <typename T>
378 379
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, const Tensor* tensor) {
380 381 382 383
    return CreateMemoryToBeCached(desc,
                                  platform::to_void_cast<T>(tensor->data<T>()));
  }

384 385
  std::shared_ptr<dnnl::memory> CreateMemoryToBeCached(
      const dnnl::memory::desc& desc, void* data) {
386 387 388 389
    return std::make_shared<memory>(desc, engine_, data);
  }

  // Create weights memory and transform to default MKL-DNN format
390
  std::shared_ptr<dnnl::memory> CreateWeightsMemory(const Tensor* weights) {
391
    auto dims = phi::vectorize(weights->dims());
392
    std::swap(dims[0], dims[1]);  // Correct output dimensions
M
Michał Gallus 已提交
393 394
    auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
395
    // Transpose weights through MKL-DNN's reorder from io to oi format.
396 397
    return Reorder(src_desc,
                   dst_desc,
A
Adam 已提交
398
                   platform::to_void_cast<float>(weights->data<float>()));
M
Michał Gallus 已提交
399 400
  }

401 402
  void CacheWeightsAndBias(const MKLDNNDeviceContext& dev_ctx,
                           const ExecutionContext& ctx) {
403 404 405
    std::string key = platform::CreateKey(dev_ctx);
    key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);

406 407 408 409 410 411
    const std::string weights_key = key + ctx.InputName("W");
    const std::string bias_key = key + ctx.InputName("Bias");
    dev_ctx.SetBlob(weights_key, weights_);
    dev_ctx.SetBlob(bias_key, bias_);
  }

M
Michał Gallus 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
  std::vector<float> ComputeBiasScales(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> bias_scales(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        bias_scales[i] = 1.0f;
      else
        bias_scales[i] = scale_in_data * scale_weights_data[i];
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
436 437
  std::tuple<std::vector<float>, float> ComputeOutputShiftScale(
      const ExecutionContext& ctx) {
M
Michał Gallus 已提交
438 439
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
440 441
    bool has_activation = !ctx.Attr<std::string>("activation_type").empty();
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
442

M
Michał Gallus 已提交
443
    // If the output will be in floats, we don't multiply by scale_out.
444

445 446 447 448 449 450
    float scale = (!force_fp32_output && has_activation)
                      ? ctx.Attr<float>("Scale_out")
                      : 1.0f;
    float inner_scale = (force_fp32_output || has_activation)
                            ? 1.0f
                            : ctx.Attr<float>("Scale_out");
M
Michał Gallus 已提交
451 452 453 454 455 456
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> output_shift_scale(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
457
        output_shift_scale[i] = inner_scale;
M
Michał Gallus 已提交
458 459
      else
        output_shift_scale[i] =
460
            inner_scale / (scale_in_data * scale_weights_data[i]);
M
Michał Gallus 已提交
461 462
    }

463
    return make_tuple(output_shift_scale, scale);
M
Michał Gallus 已提交
464 465 466 467 468 469 470 471 472 473
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

474
  void QuantizeWeights(const ExecutionContext& ctx, memory::desc dst) {
475 476
    weights_ = ReorderWithScale(
        weights_, dst, ctx.Attr<std::vector<float>>("Scale_weights"));
M
Michał Gallus 已提交
477 478 479 480 481
  }

  void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
                    const ExecutionContext& ctx) {
    auto bias_scales = ComputeBiasScales(ctx);
482
    bias_ = ReorderWithScale(bias_, fc_prim_desc.bias_desc(), bias_scales);
M
Michał Gallus 已提交
483 484
  }

485
  dnnl::primitive_attr CreateFCAttrs(const ExecutionContext& ctx) {
486 487
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
M
Michał Gallus 已提交
488

489 490 491
    std::vector<float> output_shift_scale;
    float scale;
    std::tie(output_shift_scale, scale) = ComputeOutputShiftScale(ctx);
M
Michał Gallus 已提交
492 493
    int mask = CreateMask(1, output_shift_scale.size() > 1);
    attributes.set_output_scales(mask, output_shift_scale);
494

495
    float sum_scale = 1.0f;
496 497 498 499
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
      post_operations.append_sum(sum_scale);
    }
M
Michał Gallus 已提交
500 501 502 503

    if (ctx.Attr<std::string>("activation_type") == "relu") {
      constexpr float negative_slope = 0.0f;
      constexpr float placeholder = 1.0f;  // beta
504 505
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_relu, negative_slope, placeholder);
506 507 508
    } else if (ctx.Attr<std::string>("activation_type") == "gelu") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
509 510
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_gelu, alpha, beta);
511 512 513
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_tanh") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
514 515
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_gelu_tanh, alpha, beta);
516 517 518
    } else if (ctx.Attr<std::string>("activation_type") == "gelu_erf") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
519 520
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_gelu_erf, alpha, beta);
521 522 523
    } else if (ctx.Attr<std::string>("activation_type") == "tanh") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
524 525
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_tanh, alpha, beta);
526 527 528
    } else if (ctx.Attr<std::string>("activation_type") == "sigmoid") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
529 530
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_logistic, alpha, beta);
531 532 533
    } else if (ctx.Attr<std::string>("activation_type") == "mish") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
534 535
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_mish, alpha, beta);
J
jakpiase 已提交
536 537 538
    } else if (ctx.Attr<std::string>("activation_type") == "hard_swish") {
      constexpr float alpha = 0.0f;
      constexpr float beta = 0.0f;
539 540
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_hardswish, alpha, beta);
M
Michał Gallus 已提交
541 542 543 544
    }

    attributes.set_post_ops(post_operations);
    return attributes;
545
  }
M
mozga-intel 已提交
546

547 548 549
  dnnl::inner_product_forward::primitive_desc CreateFcPrimDesc(
      const dnnl::memory::desc& input_desc,
      const dnnl::memory::desc& weights_desc,
550 551
      const dnnl::memory::desc& bias_desc,
      const dnnl::memory::desc& dst_desc,
552
      const dnnl::primitive_attr& attrs) {
553 554 555 556 557
    auto fc_desc = inner_product_forward::desc(prop_kind::forward_scoring,
                                               input_desc,
                                               weights_desc,
                                               bias_desc,
                                               dst_desc);
M
mozga-intel 已提交
558

M
Michał Gallus 已提交
559
    return inner_product_forward::primitive_desc(fc_desc, attrs, engine_);
560
  }
M
mozga-intel 已提交
561

M
Michał Gallus 已提交
562 563
  // Create output memory based on output tensor and inner_product
  // primitive descriptor format chosen for output
564 565
  dnnl::memory CreateDstMemory(
      const dnnl::inner_product_forward::primitive_desc& fc_prim_desc,
566 567
      const ExecutionContext& ctx,
      Tensor* output) {
568 569 570 571 572
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
      auto* residual_param = ctx.Output<Tensor>("ResidualData");

      PADDLE_ENFORCE_EQ(
573 574
          output->dims(),
          residual_param->dims(),
575 576 577 578
          platform::errors::InvalidArgument(
              "Output and elementwise parameter need to have the "
              "same dimension sizes, but got output's dimension = %d"
              " and residual param's dimension =%d .",
579 580
              output->dims().size(),
              residual_param->dims().size()));
581 582 583 584

      output->ShareDataWith(*residual_param);
    }

A
Adam 已提交
585 586
    auto dst_desc = fc_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
M
Michał Gallus 已提交
587 588
    T_out* output_data =
        output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
589
    memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
590
    SetOutputFormat(ctx.Input<LoDTensor>("Input")->format(), output);
591

A
Adam 已提交
592
    return dst_mem;
593
  }
M
mozga-intel 已提交
594

595 596 597 598
  void RecomputeOutputDims(const ExecutionContext& ctx,
                           const LoDTensor* input,
                           const Tensor* w,
                           LoDTensor* output) {
L
luotao1 已提交
599
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
600
    bool padding_weights = ctx.Attr<bool>("padding_weights");
601 602
    PADDLE_ENFORCE_EQ(padding_weights,
                      false,
603 604
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
605
    std::vector<int64_t> output_dims;
606 607 608 609
    FCOutputSize(input->dims(),
                 w->dims(),
                 output_dims,
                 in_num_col_dims,
610
                 padding_weights);
611
    output->Resize(phi::make_ddim(output_dims));
L
luotao1 已提交
612
    output->set_lod(input->lod());
613
  }
L
luotao1 已提交
614

615
 private:
616
  const dnnl::engine& engine_;
617 618
  paddle::optional<memory> input_;
  paddle::optional<memory> output_;
619 620
  std::shared_ptr<memory> bias_;
  std::shared_ptr<memory> weights_;
621
  paddle::optional<inner_product_forward> fc_;
622
};
M
mozga-intel 已提交
623

M
Michał Gallus 已提交
624 625 626 627 628 629
// Attempt to fetch cached primitive factory based on provided parameters
// of input format, weight dimensions and output name.
// If not cached, create a new one.
template <typename T_in, typename T_w, typename T_out>
static std::shared_ptr<FCPrimitiveFactory<T_in, T_w, T_out>>
GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
630
                    const std::string& key) {
631
  auto prim_creator =
M
Michał Gallus 已提交
632 633
      std::static_pointer_cast<FCPrimitiveFactory<T_in, T_w, T_out>>(
          dev_ctx.GetBlob(key));
634
  if (prim_creator == nullptr) {
635 636
    prim_creator = std::make_shared<FCPrimitiveFactory<T_in, T_w, T_out>>(
        dev_ctx.GetEngine());
637
    dev_ctx.SetBlob(key, prim_creator);
M
mozga-intel 已提交
638 639
  }

640 641
  return prim_creator;
}
M
mozga-intel 已提交
642

M
Michał Gallus 已提交
643 644 645
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
646 647 648 649 650 651 652
static void ExecuteFc(const ExecutionContext& ctx,
                      const LoDTensor* input,
                      const Tensor* w,
                      const Tensor* bias,
                      LoDTensor* output,
                      bool fuse_relu,
                      bool force_fp32_output) {
653
  auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
654 655 656 657 658
  std::string prim_key = platform::CreateKey(dev_ctx,
                                             input->format(),
                                             input->dims()[0],
                                             phi::vectorize<int>(w->dims()),
                                             ctx.OutputName("Out"));
659 660
  prim_key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, prim_key);

M
Michał Gallus 已提交
661 662
  constexpr bool is_int8 =
      std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
663 664
  bool is_bfloat16 = std::is_same<T_in, paddle::platform::bfloat16>::value;
  if ((!is_int8 && !is_bfloat16) || force_fp32_output) {
665 666
    GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
667 668 669
  } else if (is_bfloat16) {
    GetPrimitiveFactory<T_in, T_w, platform::bfloat16>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
670
  } else if (fuse_relu) {
671 672
    GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
673
  } else {
674 675
    GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
676 677 678 679 680
  }
}

template <typename T_in, typename T_w>
class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
M
mozga-intel 已提交
681 682
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
Michał Gallus 已提交
683
    PADDLE_ENFORCE_EQ(
684 685
        platform::is_cpu_place(ctx.GetPlace()),
        true,
M
Michał Gallus 已提交
686
        platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace."));
687
    platform::MKLDNNDeviceContext::tls().log_lib_version();
688 689
    auto input = ctx.Input<LoDTensor>("Input");
    auto w = ctx.Input<Tensor>("W");
T
tensor-tang 已提交
690
    auto bias = ctx.Input<Tensor>("Bias");
691
    auto output = ctx.Output<LoDTensor>("Out");
M
mozga-intel 已提交
692

M
Michał Gallus 已提交
693 694 695
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

696 697
    ExecuteFc<T_in, T_w>(
        ctx, input, w, bias, output, fuse_relu, force_fp32_output);
M
mozga-intel 已提交
698

699
    output->set_layout(DataLayout::kMKLDNN);
M
mozga-intel 已提交
700 701 702 703 704
  }
};
}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
705 706 707 708
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
709 710 711 712 713
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc,
                                    MKLDNN,
                                    ::paddle::platform::CPUPlace,
                                    FP32,
                                    ops::kFCMKLDNNFP32,
M
Michał Gallus 已提交
714 715
                                    ops::FCMKLDNNOpKernel<float, float>);

716
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(
717 718 719 720 721
    fc,
    MKLDNN,
    ::paddle::platform::CPUPlace,
    BF16,
    ops::kFCMKLDNNFP32,
722 723 724
    ops::FCMKLDNNOpKernel<paddle::platform::bfloat16,
                          paddle::platform::bfloat16>);

725 726 727 728 729
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc,
                                    MKLDNN,
                                    ::paddle::platform::CPUPlace,
                                    U8,
                                    ops::kFCMKLDNNINT8,
M
Michał Gallus 已提交
730 731
                                    ops::FCMKLDNNOpKernel<uint8_t, int8_t>);

732 733 734 735 736
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc,
                                    MKLDNN,
                                    ::paddle::platform::CPUPlace,
                                    S8,
                                    ops::kFCMKLDNNINT8,
M
Michał Gallus 已提交
737
                                    ops::FCMKLDNNOpKernel<int8_t, int8_t>);