fc_mkldnn_op.cc 24.1 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include <mkldnn/include/mkldnn_types.h>
#include <memory>
M
mozga-intel 已提交
17
#include "paddle/fluid/framework/tensor.h"
18
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
19 20
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
21
#include "paddle/fluid/platform/variant.h"
M
mozga-intel 已提交
22 23 24 25

namespace paddle {
namespace operators {

26 27 28 29 30 31 32 33 34 35 36 37 38
using framework::DataLayout;
using framework::Tensor;
using framework::LoDTensor;
using framework::DDim;
using framework::ExecutionContext;
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;
using platform::GetMKLDNNFormat;
using mkldnn::memory;
using mkldnn::inner_product_forward;
using mkldnn::primitive;
using mkldnn::stream;
using mkldnn::prop_kind;
M
mozga-intel 已提交
39

M
Michał Gallus 已提交
40
template <typename T_in, typename T_w, typename T_out>
41
class FCPrimitiveFactory {
M
mozga-intel 已提交
42
 public:
43 44
  explicit FCPrimitiveFactory(const mkldnn::engine& engine) : engine_(engine) {}

A
Adam 已提交
45 46
  void ExecuteFcPrimitive(const LoDTensor* input, const Tensor* weights,
                          const Tensor* bias, LoDTensor* output,
47
                          const MKLDNNDeviceContext& dev_ctx,
A
Adam 已提交
48
                          const ExecutionContext& ctx) {
49
    RecomputeOutputDims(ctx, input, weights, output);
M
Michał Gallus 已提交
50 51
    // If primitive has already been created and cached, don't create new one,
    // but update input and output data pointers and return it.
52 53
    if (fc_) {
      UpdateDataPointers(ctx, output, input);
A
Adam 已提交
54 55
      this->Execute();
      return;
56
    }  // Otherwise, create a new one.
M
mozga-intel 已提交
57

58
    auto in_col_dims = ctx.Attr<int>("in_num_col_dims");
T
tianshuo78520a 已提交
59 60 61 62 63 64
    PADDLE_ENFORCE_LE(
        in_col_dims, 2,
        platform::errors::Unimplemented(
            "DNNL FC doesn't support in_num_col_dims parameter to "
            "be higher than "
            "2."));
65 66 67 68 69 70 71 72 73 74 75 76 77
    if (in_col_dims == 2) {
      PADDLE_ENFORCE_EQ(
          input->dims().size(), 3,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "3 dim input is provided."));
      PADDLE_ENFORCE_EQ(
          input->format(), MKLDNNMemoryFormat::ncw,
          platform::errors::Unimplemented(
              "DNNL FC only supports in_num_col_dims equal to 2 when "
              "input format is equal to ncw."));
    }

78 79
    weights_ = CreateWeightsMemory(weights);

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
    // Since MKL-DNN has a lot of limitations on what the input/weights/output
    // dimensions should be, to simplify the code, the creation of primitive
    // descriptor has been divided into separate cases, based on the number
    // of input dimensions.
    size_t input_dim_num = input->dims().size();
    boost::optional<mkldnn::inner_product_forward::primitive_desc> fc_prim_desc;
    memory::desc usr_weights_desc = {};
    switch (input_dim_num) {
      case 2:
        fc_prim_desc =
            Create2DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create2DUserWeightsDesc();
        break;
      case 3:
        fc_prim_desc =
            Create3DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create3DUserWeightsDesc(weights);
        break;
      case 4:
        fc_prim_desc =
            Create4DFcPrimDescriptor(input, weights, bias, output, ctx);
        usr_weights_desc = Create4DUserWeightsDesc(input, weights);
        break;
      default:
        PADDLE_THROW(platform::errors::Unimplemented(
            "DNNL FC doesn't support input dims different than 2, 3, 4."));
        break;
107
    }
108 109 110 111
    input_ = CreateMemory<T_in>(fc_prim_desc->src_desc(), input);
    // Update weights format inside of its memory
    weights_ = Reorder(usr_weights_desc, usr_weights_desc,
                       weights_->get_data_handle());
112

113 114 115
    // Quantize weights and reorder to format chosen by FC primitive descriptor.
    QuantizeWeights(ctx, fc_prim_desc->weights_desc());

116
    bias_ = CreateMemoryToBeCached<float>(fc_prim_desc->bias_desc(), bias);
117 118
    // If int8 is desired, quantize bias into 32-bit signed int
    QuantizeBias(*fc_prim_desc, ctx);
M
mozga-intel 已提交
119

120 121 122
    // Store weights and bias in the mkldnn cache
    CacheWeightsAndBias(dev_ctx, ctx);

123 124 125 126 127 128
    // Based on format determined by inner_product, create output in desired
    // memory format
    output_ = CreateDstMemory(*fc_prim_desc, ctx, output);

    // Return MKL-DNN primitive ready to be fed into pipeline and executed
    fc_ = inner_product_forward(*fc_prim_desc);
A
Adam 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    this->Execute();
  }

  void Execute() {
    mkldnn::stream astream(engine_);
    if (bias_) {
      fc_->execute(astream, {{MKLDNN_ARG_SRC, *input_},
                             {MKLDNN_ARG_WEIGHTS, *weights_},
                             {MKLDNN_ARG_BIAS, *bias_},
                             {MKLDNN_ARG_DST, *output_}});
    } else {
      fc_->execute(astream, {{MKLDNN_ARG_SRC, *input_},
                             {MKLDNN_ARG_WEIGHTS, *weights_},
                             {MKLDNN_ARG_DST, *output_}});
    }
    astream.wait();
M
mozga-intel 已提交
145 146
  }

147
 private:
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
  // DNNL always returns 2-dimensional data block as a result of computing
  // inner product. Hence the format 'nc' is always set for its output
  // primitive. Therefore, function SetOutputFormat is needed to choose
  // an appropriate format based on the number of input dimensions and
  // format of an input tensor.
  void SetOutputFormat(MKLDNNMemoryFormat in_format, Tensor* out) {
    int dim_num = out->dims().size();
    // In case of 2 dims, we set the only possible format, nc
    if (dim_num == 2) {
      out->set_format(MKLDNNMemoryFormat::nc);
      // In case of 3 dims, we generate a format that is based on number
      // of output dims and the layout of input format (nchw or nhwc).
    } else if (dim_num == 3) {
      if (in_format == MKLDNNMemoryFormat::nwc ||
          in_format == MKLDNNMemoryFormat::nhwc) {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nhwc));
      } else {
        out->set_format(
            platform::MKLDNNFormatForSize(dim_num, MKLDNNMemoryFormat::nchw));
      }
      // In any other case we overwrite the output format with the input one.
    } else {
      out->set_format(in_format);
    }
  }

175 176
  void UpdateDataPointers(const ExecutionContext& ctx, Tensor* out,
                          const Tensor* in) {
M
Michał Gallus 已提交
177 178 179 180 181
    input_->set_data_handle(to_void_cast(in->data<T_in>()));
    output_->set_data_handle(out->mutable_data<T_out>(ctx.GetPlace()));
    // If the primitive exists, but the output tensor has changed its
    // variable, update its format to what has been determined in first
    // call to CreateFcPrimitive method.
A
Adam 已提交
182
    if (out->format() == MKLDNNMemoryFormat::undef) {
183
      SetOutputFormat(in->format(), out);
184
    }
M
mozga-intel 已提交
185 186
  }

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
  mkldnn::inner_product_forward::primitive_desc Create2DFcPrimDescriptor(
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    auto weight_dims = Get2DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get2DWeightDimsForDNNL(const Tensor* weights) {
    auto dims = framework::vectorize(weights->dims());
    std::swap(dims[0], dims[1]);  // swap input dim with output dim
    return dims;
  }

  memory::desc Create2DUserWeightsDesc() { return weights_->get_desc(); }

  mkldnn::inner_product_forward::primitive_desc Create3DFcPrimDescriptor(
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto input_dims = framework::vectorize(input->dims());
212 213
    std::vector<int64_t> new_input_dims = {input_dims[0] * input_dims[1],
                                           input_dims[2], 1};
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    auto src_desc = CreateMemDescriptor<T_in>(new_input_dims, input->format());

    auto weight_dims = Get3DWeightDimsForDNNL(weights);
    auto weights_desc =
        CreateMemDescriptor<T_w>(weight_dims, MKLDNNMemoryFormat::any);

    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);

    auto dst_dims = {input_dims[0] * input_dims[1], weight_dims[0]};
    auto dst_desc =
        CreateMemDescriptor<T_out>(dst_dims, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get3DWeightDimsForDNNL(const Tensor* weights) {
    auto paddle_w_dims = framework::vectorize(weights->dims());
231
    return {paddle_w_dims[1], paddle_w_dims[0], 1};
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
  }

  memory::desc Create3DUserWeightsDesc(const Tensor* weights) {
    auto dims = Get3DWeightDimsForDNNL(weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oiw);
  }

  mkldnn::inner_product_forward::primitive_desc Create4DFcPrimDescriptor(
      const LoDTensor* input, const Tensor* weights, const Tensor* bias,
      LoDTensor* output, const ExecutionContext& ctx) {
    auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
    // Since MKL-DNN doesn't support 4D column-major data formats in
    // inner_product primitive, transpose the weights to be in
    // row-major format
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    auto weights_desc = CreateMemDescriptor<T_w>(dims, MKLDNNMemoryFormat::any);
    auto bias_desc = CreateMemDescriptor<float>(bias, MKLDNNMemoryFormat::x);
    auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
    const auto attrs = CreatePostOps(ctx);
    return CreateFcPrimDesc(src_desc, weights_desc, bias_desc, dst_desc, attrs);
  }

  std::vector<int64_t> Get4DWeightDimsForDNNL(const LoDTensor* input,
                                              const Tensor* weights) {
    auto old_w_dims = framework::vectorize(weights->dims());
    auto old_in_dims = framework::vectorize(input->dims());
    auto dims = {old_w_dims[1], old_in_dims[1], old_in_dims[2], old_in_dims[3]};
    return dims;
  }

  memory::desc Create4DUserWeightsDesc(const LoDTensor* input,
                                       const Tensor* weights) {
    auto dims = Get4DWeightDimsForDNNL(input, weights);
    return CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oihw);
M
mozga-intel 已提交
266 267
  }

M
Michał Gallus 已提交
268
  // Convert data from one data format to another
269 270 271
  std::shared_ptr<mkldnn::memory> Reorder(const memory::desc& src_desc,
                                          const memory::desc& dst_desc,
                                          void* src_data) {
A
Adam 已提交
272
    auto src_mem = memory(src_desc, engine_, src_data);
273
    auto dst_mem = std::make_shared<memory>(dst_desc, engine_);
M
mozga-intel 已提交
274

275
    auto reorder = mkldnn::reorder(src_mem, *dst_mem);
A
Adam 已提交
276
    mkldnn::stream astream(engine_);
277
    reorder.execute(astream, src_mem, *dst_mem);
A
Adam 已提交
278
    astream.wait();
M
mozga-intel 已提交
279

280
    return dst_mem;
M
mozga-intel 已提交
281 282
  }

M
Michał Gallus 已提交
283 284
  // Convert data from one data format to another and rescale it.
  // If the desired data type is (un)signed int8, quantization occurs here.
285 286 287 288
  std::shared_ptr<mkldnn::memory> ReorderWithScale(
      const std::shared_ptr<memory> src_mem, const memory::desc& dst_md,
      const std::vector<float>& scale_data) {
    auto dst_mem = std::make_shared<mkldnn::memory>(dst_md, engine_);
M
Michał Gallus 已提交
289 290 291 292 293 294 295 296 297
    mkldnn::primitive_attr attributes;
    // According to MKL-DNN's documentation mask determines along which
    // dimensions should the scale be applied.
    // 0 - Single scale applied to whole tensor
    // 1 - Apply Scale along a slice of each dimension which index is 1.
    //     In case of weights quantization, that dimension is output,
    //     becuase we perform per-output-channel quantization
    int mask = CreateMask(0, scale_data.size() > 1);
    attributes.set_output_scales(mask, scale_data);
298
    auto reorder = mkldnn::reorder(*src_mem, *dst_mem, attributes);
M
Michał Gallus 已提交
299

A
Adam 已提交
300 301
    mkldnn::stream astream(engine_);
    reorder.execute(astream,
302
                    {{MKLDNN_ARG_FROM, *src_mem}, {MKLDNN_ARG_TO, *dst_mem}});
A
Adam 已提交
303
    astream.wait();
M
Michał Gallus 已提交
304 305 306 307 308

    return dst_mem;
  }

  template <typename T>
A
Adam 已提交
309 310
  static mkldnn::memory::desc CreateMemDescriptor(
      const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
311 312
    return platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(),
                                   format);
M
mozga-intel 已提交
313 314
  }

M
Michał Gallus 已提交
315
  template <typename T>
316
  static mkldnn::memory::desc CreateMemDescriptor(const Tensor* tensor,
317
                                                  MKLDNNMemoryFormat format) {
A
Adam 已提交
318
    auto dims = framework::vectorize(tensor->dims());
M
Michał Gallus 已提交
319
    return CreateMemDescriptor<T>(dims, format);
M
mozga-intel 已提交
320 321
  }

M
Michał Gallus 已提交
322
  template <typename T>
323 324
  mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc,
                              const Tensor* tensor) {
A
Adam 已提交
325
    return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
M
mozga-intel 已提交
326 327
  }

A
Adam 已提交
328 329
  mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc, void* data) {
    return memory(desc, engine_, data);
M
mozga-intel 已提交
330 331
  }

332 333 334 335 336 337 338 339 340 341 342 343 344 345
  template <typename T>
  std::shared_ptr<mkldnn::memory> CreateMemoryToBeCached(
      const mkldnn::memory::desc& desc, const Tensor* tensor) {
    return CreateMemoryToBeCached(desc,
                                  platform::to_void_cast<T>(tensor->data<T>()));
  }

  std::shared_ptr<mkldnn::memory> CreateMemoryToBeCached(
      const mkldnn::memory::desc& desc, void* data) {
    return std::make_shared<memory>(desc, engine_, data);
  }

  // Create weights memory and transform to default MKL-DNN format
  std::shared_ptr<mkldnn::memory> CreateWeightsMemory(const Tensor* weights) {
A
Adam 已提交
346
    auto dims = framework::vectorize(weights->dims());
347
    std::swap(dims[0], dims[1]);  // Correct output dimensions
M
Michał Gallus 已提交
348 349
    auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
350
    // Transpose weights through MKL-DNN's reorder from io to oi format.
A
Adam 已提交
351 352
    return Reorder(src_desc, dst_desc,
                   platform::to_void_cast<float>(weights->data<float>()));
M
Michał Gallus 已提交
353 354
  }

355 356 357 358 359 360 361 362 363
  void CacheWeightsAndBias(const MKLDNNDeviceContext& dev_ctx,
                           const ExecutionContext& ctx) {
    const std::string key = platform::CreateKey(platform::ThreadIDasStr());
    const std::string weights_key = key + ctx.InputName("W");
    const std::string bias_key = key + ctx.InputName("Bias");
    dev_ctx.SetBlob(weights_key, weights_);
    dev_ctx.SetBlob(bias_key, bias_);
  }

M
Michał Gallus 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
  std::vector<float> ComputeBiasScales(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> bias_scales(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        bias_scales[i] = 1.0f;
      else
        bias_scales[i] = scale_in_data * scale_weights_data[i];
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
  std::vector<float> ComputeOutputShiftScale(const ExecutionContext& ctx) {
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
    // If the output will be in floats, we don't multiply by scale_out.
    auto scale_out_data = ctx.Attr<bool>("force_fp32_output")
                              ? 1.0f
                              : ctx.Attr<float>("Scale_out");
    const size_t weight_scales_num = scale_weights_data.size();
    std::vector<float> output_shift_scale(weight_scales_num);

#pragma omp parallel for
    for (size_t i = 0; i < weight_scales_num; i++) {
      if (scale_weights_data[i] == 0.0)
        output_shift_scale[i] = scale_out_data;
      else
        output_shift_scale[i] =
            scale_out_data / (scale_in_data * scale_weights_data[i]);
    }

    return output_shift_scale;
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

418
  void QuantizeWeights(const ExecutionContext& ctx, memory::desc dst) {
419 420
    weights_ = ReorderWithScale(weights_, dst,
                                ctx.Attr<std::vector<float>>("Scale_weights"));
M
Michał Gallus 已提交
421 422 423 424 425
  }

  void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
                    const ExecutionContext& ctx) {
    auto bias_scales = ComputeBiasScales(ctx);
426
    bias_ = ReorderWithScale(bias_, fc_prim_desc.bias_desc(), bias_scales);
M
Michał Gallus 已提交
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
  }

  // Fuse relu into FC with activation type attribute has been set to 'relu'
  mkldnn::primitive_attr CreatePostOps(const ExecutionContext& ctx) {
    mkldnn::primitive_attr attributes;
    mkldnn::post_ops post_operations;

    auto output_shift_scale = ComputeOutputShiftScale(ctx);
    int mask = CreateMask(1, output_shift_scale.size() > 1);
    attributes.set_output_scales(mask, output_shift_scale);

    if (ctx.Attr<std::string>("activation_type") == "relu") {
      constexpr float scale = 1.0f;
      constexpr float negative_slope = 0.0f;
      constexpr float placeholder = 1.0f;  // beta
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
                                     negative_slope, placeholder);
    }

    attributes.set_post_ops(post_operations);
    return attributes;
448
  }
M
mozga-intel 已提交
449

450 451 452 453
  mkldnn::inner_product_forward::primitive_desc CreateFcPrimDesc(
      const mkldnn::memory::desc& input_desc,
      const mkldnn::memory::desc& weights_desc,
      const mkldnn::memory::desc& bias_desc,
M
Michał Gallus 已提交
454 455
      const mkldnn::memory::desc& dst_desc,
      const mkldnn::primitive_attr& attrs) {
456 457 458
    auto fc_desc =
        inner_product_forward::desc(prop_kind::forward_scoring, input_desc,
                                    weights_desc, bias_desc, dst_desc);
M
mozga-intel 已提交
459

M
Michał Gallus 已提交
460
    return inner_product_forward::primitive_desc(fc_desc, attrs, engine_);
461
  }
M
mozga-intel 已提交
462

M
Michał Gallus 已提交
463 464
  // Create output memory based on output tensor and inner_product
  // primitive descriptor format chosen for output
465 466 467
  mkldnn::memory CreateDstMemory(
      const mkldnn::inner_product_forward::primitive_desc& fc_prim_desc,
      const ExecutionContext& ctx, Tensor* output) {
A
Adam 已提交
468 469
    auto dst_desc = fc_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
M
Michał Gallus 已提交
470 471
    T_out* output_data =
        output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
472
    memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
473
    SetOutputFormat(ctx.Input<LoDTensor>("Input")->format(), output);
474

A
Adam 已提交
475
    return dst_mem;
476
  }
M
mozga-intel 已提交
477

478 479
  void RecomputeOutputDims(const ExecutionContext& ctx, const LoDTensor* input,
                           const Tensor* w, LoDTensor* output) {
L
luotao1 已提交
480
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
481 482 483 484
    bool padding_weights = ctx.Attr<bool>("padding_weights");
    PADDLE_ENFORCE_EQ(padding_weights, false,
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
485
    std::vector<int64_t> output_dims;
486 487
    FCOutputSize(input->dims(), w->dims(), output_dims, in_num_col_dims,
                 padding_weights);
L
luotao1 已提交
488 489
    output->Resize(framework::make_ddim(output_dims));
    output->set_lod(input->lod());
490
  }
L
luotao1 已提交
491

492 493 494 495
 private:
  const mkldnn::engine& engine_;
  boost::optional<memory> input_;
  boost::optional<memory> output_;
496 497
  std::shared_ptr<memory> bias_;
  std::shared_ptr<memory> weights_;
498 499
  boost::optional<inner_product_forward> fc_;
};
M
mozga-intel 已提交
500

M
Michał Gallus 已提交
501 502 503 504 505 506
// Attempt to fetch cached primitive factory based on provided parameters
// of input format, weight dimensions and output name.
// If not cached, create a new one.
template <typename T_in, typename T_w, typename T_out>
static std::shared_ptr<FCPrimitiveFactory<T_in, T_w, T_out>>
GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
507
                    const std::string& key) {
508
  auto prim_creator =
M
Michał Gallus 已提交
509 510
      std::static_pointer_cast<FCPrimitiveFactory<T_in, T_w, T_out>>(
          dev_ctx.GetBlob(key));
511
  if (prim_creator == nullptr) {
512 513
    prim_creator = std::make_shared<FCPrimitiveFactory<T_in, T_w, T_out>>(
        dev_ctx.GetEngine());
514
    dev_ctx.SetBlob(key, prim_creator);
M
mozga-intel 已提交
515 516
  }

517 518
  return prim_creator;
}
M
mozga-intel 已提交
519

M
Michał Gallus 已提交
520 521 522
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
523
static void ExecuteFc(const ExecutionContext& ctx, const LoDTensor* input,
A
Adam 已提交
524
                      const Tensor* w, const Tensor* bias, LoDTensor* output,
525 526 527 528 529
                      bool fuse_relu, bool force_fp32_output) {
  auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
  const std::string prim_key = platform::CreateKey(
      platform::ThreadIDasStr(), input->format(), input->dims()[0],
      framework::vectorize<int>(w->dims()), ctx.OutputName("Out"));
M
Michał Gallus 已提交
530 531 532
  constexpr bool is_int8 =
      std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
  if (!is_int8 || force_fp32_output) {
533 534
    GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
535
  } else if (fuse_relu) {
536 537
    GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
538
  } else {
539 540
    GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, prim_key)
        ->ExecuteFcPrimitive(input, w, bias, output, dev_ctx, ctx);
M
Michał Gallus 已提交
541 542 543 544 545
  }
}

template <typename T_in, typename T_w>
class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
M
mozga-intel 已提交
546 547
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
M
Michał Gallus 已提交
548 549 550
    PADDLE_ENFORCE_EQ(
        platform::is_cpu_place(ctx.GetPlace()), true,
        platform::errors::PreconditionNotMet("FC MKL-DNN must use CPUPlace."));
551 552
    auto input = ctx.Input<LoDTensor>("Input");
    auto w = ctx.Input<Tensor>("W");
T
tensor-tang 已提交
553
    auto bias = ctx.Input<Tensor>("Bias");
554
    auto output = ctx.Output<LoDTensor>("Out");
M
mozga-intel 已提交
555

M
Michał Gallus 已提交
556 557 558
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

559 560
    ExecuteFc<T_in, T_w>(ctx, input, w, bias, output, fuse_relu,
                         force_fp32_output);
M
mozga-intel 已提交
561

562
    output->set_layout(DataLayout::kMKLDNN);
M
mozga-intel 已提交
563 564 565 566 567
  }
};
}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    FP32, ops::kFCMKLDNNFP32,
                                    ops::FCMKLDNNOpKernel<float, float>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    U8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<uint8_t, int8_t>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(fc, MKLDNN, ::paddle::platform::CPUPlace,
                                    S8, ops::kFCMKLDNNINT8,
                                    ops::FCMKLDNNOpKernel<int8_t, int8_t>);