mul_mkldnn_op.cc 14.2 KB
Newer Older
P
Physher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <string>
W
wanghuancoder 已提交
16

P
Physher 已提交
17 18
#include "paddle/fluid/operators/mul_op.h"
#include "paddle/fluid/platform/mkldnn_helper.h"
W
wanghuancoder 已提交
19 20 21 22 23 24 25 26 27

namespace paddle {
namespace framework {
class Tensor;
}  // namespace framework
namespace platform {
class MKLDNNDeviceContext;
}  // namespace platform
}  // namespace paddle
P
Physher 已提交
28 29 30 31 32 33 34 35

namespace paddle {
namespace operators {

using framework::DataLayout;
using framework::DDim;
using framework::ExecutionContext;
using framework::Tensor;
36 37 38 39
using dnnl::inner_product_forward;
using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
P
Physher 已提交
40 41 42 43 44 45
using platform::MKLDNNDeviceContext;
using platform::to_void_cast;

template <typename XT, typename YT, typename OT>
class MulPrimitiveFactory {
 public:
46
  explicit MulPrimitiveFactory(const dnnl::engine &engine) : engine_(engine) {}
P
Physher 已提交
47

48 49 50 51 52
  inner_product_forward CreateMulPrimitive(const Tensor *x_input,
                                           const Tensor *y_input,
                                           Tensor *output,
                                           const ExecutionContext &ctx) {
    /* check data format and reorder if need */
P
Physher 已提交
53 54 55
    int x_num_col_dims = ctx.Attr<int>("x_num_col_dims");
    int y_num_col_dims = ctx.Attr<int>("y_num_col_dims");

56 57 58 59 60 61 62 63 64 65
    // TODO(intel-minghui) : Remove the restriction that only supports Input(Y)
    // as weights
    PADDLE_ENFORCE_EQ(
        (std::is_same<YT, float>::value), true,
        platform::errors::InvalidArgument(
            "Input(Y) must be fp32 data type since only fp32 data type is "
            "supported in the current design of MKLDNN INT8."));

    auto x_matrix = UpdateDataFormat<XT>(x_input, x_num_col_dims, ctx);
    auto y_matrix = UpdateDataFormat<YT>(y_input, y_num_col_dims, ctx);
P
Physher 已提交
66 67 68 69 70 71 72 73

    auto output_dim = output->dims();
    if (output_dim.size() != 2) {
      output->Resize({x_matrix.dims()[0], y_matrix.dims()[1]});
    }

    if (mul_) {
      UpdateDataPointers(ctx, output, &x_matrix);
A
Adam 已提交
74
      Execute();
75
      return *(mul_);
P
Physher 已提交
76 77
    }

78
    auto src_desc = CreateMemDescriptor<XT>(&x_matrix, MKLDNNMemoryFormat::nc);
P
Physher 已提交
79
    x_input_ = CreateMemory<XT>(src_desc, &x_matrix);
80 81 82 83 84 85 86 87 88

    if (is_int8_) {
      const auto trans_y = TransposeInputY(&y_matrix);
      auto scale_y = ctx.Attr<std::vector<float>>("scale_y");
      y_input_ = QuantInputY(trans_y, scale_y);
    } else {
      y_input_ = TransposeInputY(&y_matrix);
    }

89
    auto dst_desc = CreateMemDescriptor<OT>(output, MKLDNNMemoryFormat::any);
P
Physher 已提交
90 91

    mul_ = CreateMulPrimitive(*x_input_, *y_input_, dst_desc, output, ctx);
A
Adam 已提交
92
    Execute();
93 94 95 96 97 98 99 100
    return *(mul_);
  }

 private:
  memory ReorderWithScale(const memory::desc &src_desc,
                          const memory::desc &dst_desc, void *src_data,
                          const std::vector<float> &scale) {
    auto mask = scale.size() > 1 ? 1 : 0;
101
    dnnl::primitive_attr attr;
102 103 104 105 106
    attr.set_output_scales(mask, scale);

    auto src_mem = memory(src_desc, engine_, src_data);
    auto dst_mem = memory(dst_desc, engine_);

107
    auto reorder_pd = dnnl::reorder::primitive_desc(src_mem, dst_mem, attr);
108

109
    auto reorder = dnnl::reorder(reorder_pd);
110

111
    auto &astream = platform::MKLDNNDeviceContext::tls().get_stream();
112 113 114 115 116 117
    {
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
      reorder.execute(astream, src_mem, dst_mem);
      astream.wait();
    }
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133

    return dst_mem;
  }

  memory QuantInputY(memory input_y, const std::vector<float> &scale_y) {
    const auto &dims = input_y.get_desc().data.dims;
    auto ndims = input_y.get_desc().data.ndims;
    auto y_dims = std::vector<int64_t>(dims, dims + ndims);

    auto user_y_desc = CreateMemDescriptor<YT>(y_dims, MKLDNNMemoryFormat::oi);
    auto y_desc = CreateMemDescriptor<int8_t>(y_dims, MKLDNNMemoryFormat::oi);

    return ReorderWithScale(user_y_desc, y_desc, input_y.get_data_handle(),
                            scale_y);
  }

134 135 136
  dnnl::primitive_attr CreateMulAttr(const ExecutionContext &ctx,
                                     bool force_fp32_output) {
    dnnl::primitive_attr mul_attr;
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

    auto scale_y_data = ctx.Attr<std::vector<float>>("scale_y");
    auto scale_x_data = ctx.Attr<float>("scale_x");
    auto scale_out_data =
        force_fp32_output ? 1.0f : ctx.Attr<float>("scale_out");

    bool is_multi_channel = scale_y_data.size() > 1;
    int count = is_multi_channel ? scale_y_data.size() : 1;
    std::vector<float> output_shift_scale(count);
    for (int i = 0; i < count; i++) {
      if (scale_y_data[i] == 0.0)
        output_shift_scale[i] = scale_out_data;
      else
        output_shift_scale[i] =
            scale_out_data / (scale_x_data * scale_y_data[i]);
    }
    int mul_mask = is_multi_channel ? 1 : 0;
    mul_attr.set_output_scales(mul_mask, output_shift_scale);

    return mul_attr;
  }

  inner_product_forward CreateMulPrimitive(const memory &x_memory,
                                           const memory &y_memory,
                                           const memory::desc &dst_desc,
                                           Tensor *output,
                                           const ExecutionContext &ctx) {
    const auto x_desc = x_memory.get_desc();
    const auto y_desc = y_memory.get_desc();
    inner_product_forward::primitive_desc mul_prim_desc;

    const auto &mul_desc = inner_product_forward::desc(
        prop_kind::forward, x_desc, y_desc, dst_desc);

    if (is_int8_) {
      bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
      auto mul_attr = CreateMulAttr(ctx, force_fp32_output);
      mul_prim_desc =
          inner_product_forward::primitive_desc(mul_desc, mul_attr, engine_);
    } else {
      mul_prim_desc = inner_product_forward::primitive_desc(mul_desc, engine_);
    }

    output_ = CreateDstMemory(mul_prim_desc, ctx, output);

    return inner_product_forward(mul_prim_desc);
P
Physher 已提交
183 184
  }

A
Adam 已提交
185
  void Execute() {
186
    auto &astream = platform::MKLDNNDeviceContext::tls().get_stream();
187 188 189
    (*mul_).execute(astream, {{DNNL_ARG_SRC, *x_input_},
                              {DNNL_ARG_WEIGHTS, *y_input_},
                              {DNNL_ARG_DST, *output_}});
A
Adam 已提交
190 191 192
    astream.wait();
  }

P
Physher 已提交
193 194 195 196 197
  template <typename T>
  Tensor UpdateDataFormat(const Tensor *data, int num_col_dims,
                          const ExecutionContext &ctx) {
    Tensor x_tmp;
    Tensor data_matrix;
198 199
    MKLDNNMemoryFormat src_fmt = data->format();
    MKLDNNMemoryFormat dst_fmt;
P
Physher 已提交
200 201 202
    auto src_mdesc = CreateMemDescriptor<T>(data, src_fmt);

    if ((data->dims().size() == 4 &&
203
         src_fmt != (dst_fmt = MKLDNNMemoryFormat::nchw)) ||
P
Physher 已提交
204
        (data->dims().size() == 5 &&
205
         src_fmt != (dst_fmt = MKLDNNMemoryFormat::ncdhw))) {
P
Physher 已提交
206 207 208 209 210 211 212
      auto dst_mdesc = CreateMemDescriptor<T>(data, dst_fmt);
      x_tmp.mutable_data<T>(ctx.GetPlace(), data->memory_size());

      Reorder(src_mdesc, dst_mdesc, to_void_cast<T>(data->data<T>()),
              to_void_cast<T>(x_tmp.data<T>()));

      x_tmp.Resize(data->dims());
A
Adam 已提交
213
      x_tmp.set_format(platform::GetMKLDNNFormat(dst_mdesc));
P
Physher 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226
      data_matrix = framework::ReshapeToMatrix(x_tmp, num_col_dims);
    } else {
      data_matrix = framework::ReshapeToMatrix(*data, num_col_dims);
    }

    return data_matrix;
  }

  void UpdateDataPointers(const ExecutionContext &ctx, Tensor *out,
                          const Tensor *in) {
    x_input_->set_data_handle(to_void_cast<XT>(in->data<XT>()));
    output_->set_data_handle(out->mutable_data<OT>(ctx.GetPlace()));

A
Adam 已提交
227
    if (out->format() == MKLDNNMemoryFormat::undef) {
A
Adam 已提交
228
      auto output_format = platform::GetMKLDNNFormat(*output_);
229
      out->set_format((MKLDNNMemoryFormat)output_format);
P
Physher 已提交
230 231 232 233 234
    }
  }

  template <typename T>
  memory::desc CreateMemDescriptor(
235
      const Tensor *tensor, MKLDNNMemoryFormat format,
P
Physher 已提交
236
      memory::data_type type = platform::MKLDNNGetDataType<T>()) {
A
Adam 已提交
237
    auto dims = framework::vectorize<int64_t>(tensor->dims());
P
Physher 已提交
238 239 240 241 242
    return platform::MKLDNNMemDesc(dims, type, format);
  }

  template <typename T>
  memory::desc CreateMemDescriptor(
A
Adam 已提交
243
      const std::vector<int64_t> &dims, MKLDNNMemoryFormat format,
P
Physher 已提交
244 245 246 247 248 249
      memory::data_type type = platform::MKLDNNGetDataType<T>()) {
    return platform::MKLDNNMemDesc(dims, type, format);
  }

  template <typename T>
  memory CreateMemory(const memory::desc &desc, const Tensor *tensor) {
A
Adam 已提交
250
    return memory(desc, engine_, to_void_cast<T>(tensor->data<T>()));
P
Physher 已提交
251 252 253 254 255
  }

  memory CreateDstMemory(
      const inner_product_forward::primitive_desc &mul_prim_desc,
      const ExecutionContext &ctx, Tensor *output) {
A
Adam 已提交
256 257
    auto dst_desc = mul_prim_desc.dst_desc();
    auto buffer_size = dst_desc.get_size();
P
Physher 已提交
258 259

    OT *output_data = output->mutable_data<OT>(ctx.GetPlace(), buffer_size);
A
Adam 已提交
260 261
    output->set_format(paddle::platform::GetMKLDNNFormat(dst_desc));
    return memory(dst_desc, engine_, to_void_cast<OT>(output_data));
P
Physher 已提交
262 263 264 265
  }

  memory Reorder(const memory::desc &src_desc, const memory::desc &dst_desc,
                 void *src_data, void *dst_data = NULL) {
A
Adam 已提交
266 267 268
    auto src_mem = memory(src_desc, engine_, src_data);
    auto dst_mem = dst_data ? memory(dst_desc, engine_, dst_data)
                            : memory(dst_desc, engine_);
P
Physher 已提交
269

270
    auto reorder = dnnl::reorder(src_mem, dst_mem);
A
Adam 已提交
271

272
    auto &astream = platform::MKLDNNDeviceContext::tls().get_stream();
273 274 275 276 277 278
    {
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
      reorder.execute(astream, src_mem, dst_mem);
      astream.wait();
    }
P
Physher 已提交
279 280 281 282 283

    return dst_mem;
  }

  memory TransposeInputY(const Tensor *input_y) {
A
Adam 已提交
284
    auto dims = framework::vectorize<int64_t>(input_y->dims());
P
Physher 已提交
285
    std::swap(dims[0], dims[1]);  // Correct output dimensions
286 287
    auto src_desc = CreateMemDescriptor<YT>(dims, MKLDNNMemoryFormat::io);
    auto dst_desc = CreateMemDescriptor<YT>(dims, MKLDNNMemoryFormat::oi);
P
Physher 已提交
288 289 290
    return Reorder(src_desc, dst_desc, to_void_cast<YT>(input_y->data<YT>()));
  }

291
  const dnnl::engine &engine_;
292 293 294 295
  paddle::optional<memory> x_input_;
  paddle::optional<memory> y_input_;
  paddle::optional<memory> output_;
  paddle::optional<inner_product_forward> mul_;
296 297
  static constexpr bool is_int8_ =
      std::is_same<XT, int8_t>::value || std::is_same<XT, uint8_t>::value;
P
Physher 已提交
298 299 300 301 302 303 304
};

/* OT: output data type */
template <typename XT, typename YT, typename OT>
std::shared_ptr<MulPrimitiveFactory<XT, YT, OT>> GetPrimitiveFactory(
    const MKLDNNDeviceContext &dev_ctx, const ExecutionContext &ctx,
    const Tensor *input_x, const Tensor *input_y,
305
    const dnnl::engine &mkldnn_engine) {
306 307 308 309 310
  std::string key = platform::CreateKey(
      dev_ctx, input_x->type(), framework::vectorize(input_x->dims()),
      input_y->type(), framework::vectorize(input_y->dims()),
      ctx.OutputName("Out"));
  key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);
P
Physher 已提交
311 312 313 314 315 316

  auto prim_creator = std::static_pointer_cast<MulPrimitiveFactory<XT, YT, OT>>(
      dev_ctx.GetBlob(key));

  if (prim_creator == nullptr) {
    prim_creator =
317
        std::make_shared<MulPrimitiveFactory<XT, YT, OT>>(mkldnn_engine);
P
Physher 已提交
318 319 320 321 322 323 324 325 326 327 328
    dev_ctx.SetBlob(key, prim_creator);
  }

  return prim_creator;
}

template <typename XT, typename YT>
inner_product_forward GetMulPrimitive(const MKLDNNDeviceContext &dev_ctx,
                                      const ExecutionContext &ctx,
                                      const Tensor *input_x,
                                      const Tensor *input_y, Tensor *output,
329
                                      const dnnl::engine &mkldnn_engine) {
330
  constexpr bool is_int8 =
P
Physher 已提交
331 332 333
      std::is_same<XT, int8_t>::value || std::is_same<XT, uint8_t>::value;
  bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

334
  if (is_int8 && !force_fp32_output) {
P
Physher 已提交
335
    return GetPrimitiveFactory<XT, YT, int8_t>(dev_ctx, ctx, input_x, input_y,
336
                                               mkldnn_engine)
P
Physher 已提交
337 338 339 340
        ->CreateMulPrimitive(input_x, input_y, output, ctx);

  } else {
    return GetPrimitiveFactory<XT, YT, float>(dev_ctx, ctx, input_x, input_y,
341
                                              mkldnn_engine)
P
Physher 已提交
342 343 344 345 346 347 348 349 350
        ->CreateMulPrimitive(input_x, input_y, output, ctx);
  }
}

/* XT: input x data type, YT: input y data type */
template <typename XT, typename YT>
class MulMKLDNNKernel : public framework::OpKernel<XT> {
 public:
  void Compute(const ExecutionContext &ctx) const override {
351 352 353
    PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
                      paddle::platform::errors::PreconditionNotMet(
                          "Operator DNNL Mul must use CPUPlace"));
354
    platform::MKLDNNDeviceContext::tls().log_lib_version();
P
Physher 已提交
355
    auto &dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
356
    auto &mkldnn_engine = dev_ctx.GetEngine();
P
Physher 已提交
357 358 359 360 361 362 363 364 365 366 367 368

    const Tensor *x = ctx.Input<Tensor>("X");
    const Tensor *y = ctx.Input<Tensor>("Y");
    Tensor *out = ctx.Output<Tensor>("Out");
    auto out_dims = out->dims();

    auto mul = GetMulPrimitive<XT, YT>(dev_ctx, ctx, x, y, out, mkldnn_engine);

    if (out_dims.size() != 2) {
      out->Resize(out_dims);
    }
    out->set_layout(DataLayout::kMKLDNN);
A
Adam 已提交
369 370
    out->set_format(platform::MKLDNNFormatForSize(out_dims.size(),
                                                  MKLDNNMemoryFormat::nchw));
P
Physher 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(mul, MKLDNN, ::paddle::platform::CPUPlace,
                                    U8, ops::kMULMKLDNNINT8,
                                    ops::MulMKLDNNKernel<uint8_t, float>);

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(mul, MKLDNN, ::paddle::platform::CPUPlace,
                                    S8, ops::kMULMKLDNNINT8,
                                    ops::MulMKLDNNKernel<int8_t, float>);

REGISTER_OP_KERNEL(mul, MKLDNN, ::paddle::platform::CPUPlace,
                   ops::MulMKLDNNKernel<uint8_t, float>);