fc_mkldnn_op.cc 19.7 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
18
#include "paddle/fluid/platform/mkldnn_helper.h"
19
#include "paddle/fluid/platform/mkldnn_reuse.h"
20

M
mozga-intel 已提交
21 22 23
namespace paddle {
namespace operators {

24 25 26 27 28
using dnnl::inner_product_forward;
using dnnl::memory;
using dnnl::primitive;
using dnnl::prop_kind;
using dnnl::stream;
29 30
using framework::DDim;
using framework::ExecutionContext;
31
using LoDTensor = phi::DenseTensor;
32 33
using phi::funcs::OneDNNGetDataType;
using phi::funcs::to_void_cast;
34
using platform::MKLDNNDeviceContext;
35

36 37 38 39 40 41 42
struct InnerProductCache {
  dnnl::inner_product_forward inner_product_p;
  dnnl::memory src_mem;
  dnnl::memory weights_mem;
  dnnl::memory bias_mem;
  dnnl::memory dst_mem;
};
M
Michał Gallus 已提交
43
template <typename T_in, typename T_w, typename T_out>
44
class FCMKLDNNHandler
45 46
    : public phi::funcs::OneDNNHandlerNoCachingT<T_in,
                                                 dnnl::inner_product_forward> {
M
mozga-intel 已提交
47
 public:
48 49
  FCMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
                  const platform::MKLDNNDeviceContext& dev_ctx,
50 51 52 53
                  const phi::DenseTensor* x,
                  const phi::DenseTensor* weights,
                  const phi::DenseTensor* bias,
                  phi::DenseTensor* out,
54 55 56
                  const int in_num_col_dims,
                  dnnl::engine mkldnn_engine,
                  platform::Place cpu_place)
57
      : phi::funcs::OneDNNHandlerNoCachingT<T_in, dnnl::inner_product_forward>(
58 59 60 61 62 63 64 65 66 67
            mkldnn_engine, cpu_place),
        dev_ctx_(dev_ctx) {
    this->memory_key_ = ctx.InputName("W");

    auto x_vec_dims = phi::vectorize(x->dims());
    auto weights_vec_dims = phi::vectorize(weights->dims());

    int MB = 1;
    for (int i = 0; i < in_num_col_dims; ++i) {
      MB *= x_vec_dims[i];
68 69
    }

70 71 72
    int IC = 1;
    for (size_t i = in_num_col_dims; i < x_vec_dims.size(); ++i) {
      IC *= x_vec_dims[i];
73
    }
74

75
    int OC = weights_vec_dims[1];
M
mozga-intel 已提交
76

77
    dnnl::memory::desc bias_md;
78

79
    auto src_md = dnnl::memory::desc(
80
        {MB, IC}, OneDNNGetDataType<T_in>(), dnnl::memory::format_tag::any);
81
    auto weights_md = dnnl::memory::desc(
82
        {OC, IC}, OneDNNGetDataType<T_w>(), dnnl::memory::format_tag::any);
83
    auto dst_md = dnnl::memory::desc(
84
        {MB, OC}, OneDNNGetDataType<T_out>(), dnnl::memory::format_tag::any);
85 86
    if (bias) {
      bias_md = dnnl::memory::desc({bias->numel()},
87
                                   OneDNNGetDataType<float>(),
88 89
                                   dnnl::memory::format_tag::a);
    }
90

91
    const auto attrs = CreateFCAttrs(ctx);
A
Adam 已提交
92

93 94 95 96 97 98
    this->AcquireForwardPrimitiveDescriptor(attrs,
                                            prop_kind::forward_inference,
                                            src_md,
                                            weights_md,
                                            bias_md,
                                            dst_md);
M
mozga-intel 已提交
99 100
  }

101
 private:
102 103 104
  dnnl::primitive_attr CreateFCAttrs(const ExecutionContext& ctx) {
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
105

106 107
    std::vector<float> output_shift_scale;
    float scale = 1.0f;
108
    if (phi::funcs::is_int8<T_w>()) {
109 110
      std::tie(output_shift_scale, scale) = ComputeOutputShiftScale(ctx);
      int mask = CreateMask(1, output_shift_scale.size() > 1);
111
      attributes.set_output_scales(mask, output_shift_scale);
112
    }
113

114
    float sum_scale = 1.0f;
115 116
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
117
      post_operations.append_sum(sum_scale);
118
    }
M
mozga-intel 已提交
119

120 121 122 123
    // ReLU from "fc_fuse_pass"
    if (ctx.Attr<std::string>("activation_type") == "relu") {
      post_operations.append_eltwise(
          scale, dnnl::algorithm::eltwise_relu, 0.0f, 0.0f);
124
    }
125
    platform::AppendActivation(ctx, post_operations, scale);
126

127 128 129 130 131 132
    if (ctx.HasAttr("fused_output_scale")) {
      float scale_alpha = ctx.Attr<float>("fused_output_scale");
      post_operations.append_eltwise(
          1.0, dnnl::algorithm::eltwise_linear, scale_alpha, 0.0f);
    }

133 134
    attributes.set_post_ops(post_operations);
    return attributes;
135 136
  }

M
Michał Gallus 已提交
137 138
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
139 140 141
  std::vector<float> ComputeBiasScales(
      const float scale_in, const std::vector<float>& scale_weights) {
    std::vector<float> bias_scales(scale_weights.size());
M
Michał Gallus 已提交
142

143 144
    for (size_t i = 0; i < bias_scales.size(); ++i) {
      if (scale_weights[i] == 0.0)
M
Michał Gallus 已提交
145 146
        bias_scales[i] = 1.0f;
      else
147
        bias_scales[i] = scale_in * scale_weights[i];
M
Michał Gallus 已提交
148 149 150 151 152 153 154 155 156 157
    }

    return bias_scales;
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
158 159
  std::tuple<std::vector<float>, float> ComputeOutputShiftScale(
      const ExecutionContext& ctx) {
M
Michał Gallus 已提交
160 161
    auto scale_in_data = ctx.Attr<float>("Scale_in");
    auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
162 163
    bool has_activation = !ctx.Attr<std::string>("activation_type").empty();
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
164

M
Michał Gallus 已提交
165
    // If the output will be in floats, we don't multiply by scale_out.
166

167 168 169 170 171 172
    float scale = (!force_fp32_output && has_activation)
                      ? ctx.Attr<float>("Scale_out")
                      : 1.0f;
    float inner_scale = (force_fp32_output || has_activation)
                            ? 1.0f
                            : ctx.Attr<float>("Scale_out");
M
Michał Gallus 已提交
173 174
    const size_t weight_scales_num = scale_weights_data.size();

175
    for (size_t i = 0; i < weight_scales_num; ++i) {
M
Michał Gallus 已提交
176
      if (scale_weights_data[i] == 0.0)
177
        scale_weights_data[i] = inner_scale;
M
Michał Gallus 已提交
178
      else
179
        scale_weights_data[i] =
180
            inner_scale / (scale_in_data * scale_weights_data[i]);
M
Michał Gallus 已提交
181 182
    }

183
    return make_tuple(scale_weights_data, scale);
M
Michał Gallus 已提交
184 185 186 187 188 189 190 191 192 193
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

194 195 196 197 198 199
  std::shared_ptr<dnnl::memory> AcquireMemoryWithReorderAndAttrs(
      const dnnl::memory::desc& user_md,
      const dnnl::memory::desc& target_md,
      void* ptr,
      const dnnl::primitive_attr& attrs) {
    std::shared_ptr<dnnl::memory> target_memory_p;
M
Michał Gallus 已提交
200

201 202 203 204 205
    auto user_memory_p =
        std::make_shared<dnnl::memory>(user_md, this->engine_, ptr);
    target_memory_p = std::make_shared<dnnl::memory>(target_md, this->engine_);
    auto reorder_p = std::make_shared<dnnl::reorder>(
        *user_memory_p, *target_memory_p, attrs);
M
Michał Gallus 已提交
206

207
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
208 209 210 211 212 213 214 215 216 217 218
    {
      platform::RecordEvent record_reorder(
          "int_reorder",
          platform::TracerEventType::UserDefined,
          1,
          platform::EventRole::kUniqueOp);
      reorder_p->execute(
          astream,
          {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
      astream.wait();
    }
M
Michał Gallus 已提交
219

220 221
    return target_memory_p;
  }
222

223 224
  std::string memory_key_;
  const platform::MKLDNNDeviceContext& dev_ctx_;
M
Michał Gallus 已提交
225

226
 public:
227 228
  std::shared_ptr<dnnl::memory> AcquireSrcMemoryWithReorder(
      const phi::DenseTensor* x) {
229 230 231 232 233 234 235
    const T_in* x_data = x->data<T_in>();

    auto user_md = x->mem_desc();
    if (x->dims().size() != 2) {
      // reshape restrictions are always satisfied because in case of 3 or 4 dim
      // input, plain layout is enforced
      user_md = user_md.reshape(this->fwd_pd_->src_desc().dims());
M
Michał Gallus 已提交
236 237
    }

238 239
    return this->AcquireMemoryWithReorder(
        user_md, this->fwd_pd_->src_desc(), to_void_cast<T_in>(x_data));
240
  }
M
mozga-intel 已提交
241

242
  std::shared_ptr<dnnl::memory> AcquireBiasMemoryWithReorder(
243
      const phi::DenseTensor* bias,
244 245 246 247
      const float scale_in,
      const std::vector<float>& scale_weights) {
    const float* bias_data = bias->data<float>();

248
    if (phi::funcs::is_int8<T_w>() == false) {
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
      // for BF16/FP32 bias is 1D and has no scales, so reorder is not needed
      return this->AcquireMemoryFromPrimitive(this->fwd_pd_->bias_desc(),
                                              to_void_cast<float>(bias_data));
    } else {
      const std::string bias_key = this->memory_key_ + "@bias";
      auto memory_p = std::static_pointer_cast<dnnl::memory>(
          this->dev_ctx_.GetBlob(bias_key));

      if (!memory_p) {
        const auto& scale_data = ComputeBiasScales(scale_in, scale_weights);
        dnnl::primitive_attr attrs;

        int mask = CreateMask(0, scale_data.size() > 1);
        attrs.set_output_scales(mask, scale_data);

        auto user_md = dnnl::memory::desc({bias->dims()[0]},
265
                                          OneDNNGetDataType<float>(),
266 267 268 269 270 271 272
                                          dnnl::memory::format_tag::a);

        memory_p = this->AcquireMemoryWithReorderAndAttrs(
            user_md,
            this->fwd_pd_->bias_desc(),
            to_void_cast<float>(bias_data),
            attrs);
273
        this->dev_ctx_.SetBlob(bias_key, memory_p);
274 275 276 277 278 279
      }
      return memory_p;
    }
  }

  std::shared_ptr<dnnl::memory> AcquireWeightsMemoryWithReorder(
280
      const phi::DenseTensor* weights, const std::vector<float>& scale_data) {
281 282 283
    const std::string weights_key = this->memory_key_ + "@weights";
    auto memory_p = std::static_pointer_cast<dnnl::memory>(
        this->dev_ctx_.GetBlob(weights_key));
M
mozga-intel 已提交
284

285 286 287 288 289
    if (!memory_p) {
      const float* weights_data = weights->data<float>();
      auto weights_dims = this->fwd_pd_->weights_desc().dims();

      auto user_md = dnnl::memory::desc(weights_dims,
290
                                        OneDNNGetDataType<float>(),
291 292
                                        dnnl::memory::format_tag::io);

293
      if (phi::funcs::is_int8<T_w>()) {
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
        dnnl::primitive_attr attrs;
        int mask = CreateMask(0, scale_data.size() > 1);
        attrs.set_output_scales(mask, scale_data);

        memory_p = this->AcquireMemoryWithReorderAndAttrs(
            user_md,
            this->fwd_pd_->weights_desc(),
            to_void_cast<float>(weights_data),
            attrs);
      } else {
        memory_p =
            this->AcquireMemoryWithReorder(user_md,
                                           this->fwd_pd_->weights_desc(),
                                           to_void_cast<float>(weights_data));
      }

      this->dev_ctx_.SetBlob(weights_key, memory_p);
    }
    return memory_p;
313
  }
M
mozga-intel 已提交
314

315
  std::shared_ptr<dnnl::memory> AcquireCustomDstMemory(
316
      const ExecutionContext& ctx, phi::DenseTensor* out) {
317 318
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
319
      auto* residual_param = ctx.Output<phi::DenseTensor>("ResidualData");
320 321

      PADDLE_ENFORCE_EQ(
322
          out->dims(),
323
          residual_param->dims(),
324 325 326 327
          platform::errors::InvalidArgument(
              "Output and elementwise parameter need to have the "
              "same dimension sizes, but got output's dimension = %d"
              " and residual param's dimension =%d .",
328
              out->dims().size(),
329
              residual_param->dims().size()));
330

331
      out->ShareDataWith(*residual_param);
332
    }
333
    return this->template AcquireDstMemory<T_out>(out);
334 335
  }  // namespace operators
};   // namespace paddle
336

337 338 339 340 341 342 343 344 345 346
#define IF_CHANGE_FC_TW_TYPENAME(condition, ...) \
  if (condition) {                               \
    using T_w = int8_t;                          \
    __VA_ARGS__();                               \
  } else {                                       \
    using T_w = T_in;                            \
    __VA_ARGS__();                               \
  }

template <typename T_in>
347 348 349 350 351
class FCMKLDNNKernel : public framework::OpKernel<T_in> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
352

353 354 355
    IF_CHANGE_FC_TW_TYPENAME((std::is_same<T_in, uint8_t>::value), ([&] {
                               if (force_fp32_output) {
                                 this->RunKernel<float, T_w>(ctx);
356
                               } else if (phi::funcs::is_int8<T_in>()) {
357 358 359 360 361 362 363 364 365
                                 if (fuse_relu) {
                                   this->RunKernel<uint8_t, T_w>(ctx);
                                 } else {
                                   this->RunKernel<int8_t, T_w>(ctx);
                                 }
                               } else {
                                 this->RunKernel<T_in, T_w>(ctx);
                               }
                             }));
366 367
  }

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
  void PrepareSrcMem(const std::shared_ptr<inner_product_forward>& fc_p,
                     const std::shared_ptr<dnnl::memory>& src_mem,
                     const LoDTensor* x,
                     const dnnl::engine& engine) const {
    auto x_md = x->mem_desc().reshape(src_mem->get_desc().dims());
    if (x_md != src_mem->get_desc()) {
      dnnl::memory x_mem(x_md, engine, to_void_cast<T_in>(x->data<T_in>()));
      auto reorder_p = dnnl::reorder(x_mem, *src_mem);

      auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream();
      reorder_p.execute(astream, x_mem, *src_mem);
      astream.wait();
    } else {
      src_mem->set_data_handle(to_void_cast<T_in>(x->data<T_in>()));
    }
  }

385
  template <typename T_out, typename T_w>
386 387 388 389 390 391
  void RunKernel(const framework::ExecutionContext& ctx) const {
    const auto& dev_ctx =
        ctx.template device_context<platform::MKLDNNDeviceContext>();
    const auto& mkldnn_engine = dev_ctx.GetEngine();

    const auto* x = ctx.Input<LoDTensor>("Input");
392 393
    const auto* weights = ctx.Input<phi::DenseTensor>("W");
    const auto* bias = ctx.Input<phi::DenseTensor>("Bias");
394 395 396 397 398
    auto out = ctx.Output<LoDTensor>("Out");

    const float scale_in = ctx.Attr<float>("Scale_in");
    const auto& scale_weights = ctx.Attr<std::vector<float>>("Scale_weights");

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
    std::shared_ptr<dnnl::inner_product_forward> fc_p;
    std::shared_ptr<dnnl::memory> src_memory_p;
    std::shared_ptr<dnnl::memory> weights_memory_p;
    std::shared_ptr<dnnl::memory> bias_memory_p;
    std::shared_ptr<dnnl::memory> dst_memory_p;

    std::string cache_key;
    cache_key.reserve(64);
    cache_key = platform::ExtendKeyWithThreadInfoIfNeeded(
        dev_ctx,
        platform::CreateKey(dev_ctx,
                            ctx.InputName("Input"),
                            ctx.InputName("W"),
                            phi::vectorize(x->dims())));

    auto inner_product_cache =
        std::static_pointer_cast<InnerProductCache>(dev_ctx.GetBlob(cache_key));

417 418
    RecomputeOutputDims(ctx, x, weights, out);

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
    if (inner_product_cache) {
      fc_p = std::make_shared<dnnl::inner_product_forward>(
          inner_product_cache->inner_product_p);
      src_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->src_mem);
      PrepareSrcMem(fc_p, src_memory_p, x, mkldnn_engine);

      weights_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->weights_mem);

      dst_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->dst_mem);
      if (ctx.HasAttr("fuse_residual_connection") &&
          ctx.Attr<bool>("fuse_residual_connection")) {
        auto* residual_param = ctx.Output<phi::DenseTensor>("ResidualData");
        out->ShareDataWith(*residual_param);
      }
      auto out_ptr = out->mutable_data<T_out>(
          ctx.GetPlace(), dst_memory_p->get_desc().get_size());
      dst_memory_p->set_data_handle(out_ptr);

      if (bias) {
        bias_memory_p =
            std::make_shared<dnnl::memory>(inner_product_cache->bias_mem);
      }
    } else {
      auto in_col_dims = ctx.Attr<int>("in_num_col_dims");

      FCMKLDNNHandler<T_in, T_w, T_out> handler(ctx,
                                                dev_ctx,
                                                x,
                                                weights,
                                                bias,
                                                out,
                                                in_col_dims,
                                                mkldnn_engine,
                                                ctx.GetPlace());

      src_memory_p = handler.AcquireSrcMemoryWithReorder(x);
      weights_memory_p =
          handler.AcquireWeightsMemoryWithReorder(weights, scale_weights);
      dst_memory_p = handler.AcquireCustomDstMemory(ctx, out);

      if (bias) {
        bias_memory_p =
            handler.AcquireBiasMemoryWithReorder(bias, scale_in, scale_weights);
      }

      fc_p = handler.AcquireForwardPrimitive();
    }

470 471 472 473 474 475 476 477 478 479 480 481 482 483
    auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream();

    std::unordered_map<int, dnnl::memory> fc_args = {
        {DNNL_ARG_SRC, *src_memory_p},
        {DNNL_ARG_WEIGHTS, *weights_memory_p},
        {DNNL_ARG_DST, *dst_memory_p}};

    if (bias) {
      fc_args.insert({DNNL_ARG_BIAS, *bias_memory_p});
    }

    fc_p->execute(astream, fc_args);
    astream.wait();

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
    if (!inner_product_cache) {
      auto ip_cache = std::make_shared<InnerProductCache>();
      ip_cache->inner_product_p = *fc_p;
      ip_cache->src_mem = *src_memory_p;
      ip_cache->weights_mem = *weights_memory_p;
      ip_cache->dst_mem = *dst_memory_p;
      if (bias) {
        ip_cache->bias_mem = *bias_memory_p;
      }
      dev_ctx.SetBlob(cache_key, ip_cache);
    }

    platform::SetOutMemDescWithLogicalLayoutFusesSupport(
        ctx,
        out,
499
        dst_memory_p->get_desc().reshape(phi::vectorize(out->dims())));
500
  }
M
mozga-intel 已提交
501

502
  void RecomputeOutputDims(const ExecutionContext& ctx,
503
                           const LoDTensor* x,
504
                           const phi::DenseTensor* weights,
505
                           LoDTensor* out) const {
L
luotao1 已提交
506
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
507
    bool padding_weights = ctx.Attr<bool>("padding_weights");
508 509
    PADDLE_ENFORCE_EQ(padding_weights,
                      false,
510 511
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
512
    std::vector<int64_t> output_dims;
513 514
    FCOutputSize(x->dims(),
                 weights->dims(),
515 516
                 output_dims,
                 in_num_col_dims,
517
                 padding_weights);
518 519
    out->Resize(phi::make_ddim(output_dims));
    out->set_lod(x->lod());
520 521
  }
};
M
mozga-intel 已提交
522 523 524 525

}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
526 527 528 529
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
530 531 532 533 534 535 536 537

REGISTER_OP_KERNEL(fc,
                   MKLDNN,
                   ::paddle::platform::CPUPlace,
                   ops::FCMKLDNNKernel<float>,
                   ops::FCMKLDNNKernel<paddle::platform::bfloat16>,
                   ops::FCMKLDNNKernel<uint8_t>,
                   ops::FCMKLDNNKernel<int8_t>);