conv_mkldnn_op.cc 45.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

15
#include <unordered_map>
Y
Yu Yang 已提交
16 17
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/memory/malloc.h"
18
#include "paddle/fluid/operators/conv_op.h"
J
Jacek Czaja 已提交
19
#include "paddle/fluid/platform/mkldnn_reuse.h"
20 21 22 23

namespace paddle {
namespace operators {

24 25 26 27 28 29 30 31
using framework::DataLayout;
using mkldnn::memory;
using mkldnn::primitive;
using mkldnn::reorder;
using mkldnn::stream;
using platform::to_void_cast;
using platform::GetMKLDNNFormat;

A
Adam 已提交
32 33
inline void GetWeightsTz(std::vector<int64_t>& weights_tz,  // NOLINT
                         int groups, bool is_conv3d) {
Y
Yihua Xu 已提交
34
  if (groups > 1) {
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
    if (is_conv3d) {
      int output = weights_tz[0];
      int input = weights_tz[1];
      int dimension = weights_tz[2];
      int height = weights_tz[3];
      int width = weights_tz[4];
      weights_tz.resize(6);
      weights_tz[0] = groups;
      weights_tz[1] = output / groups;
      weights_tz[2] = input;
      weights_tz[3] = dimension;
      weights_tz[4] = height;
      weights_tz[5] = width;
    } else {
      int output = weights_tz[0];
      int input = weights_tz[1];
      int height = weights_tz[2];
      int width = weights_tz[3];
      weights_tz.resize(5);
      weights_tz[0] = groups;
      weights_tz[1] = output / groups;
      weights_tz[2] = input;
      weights_tz[3] = height;
      weights_tz[4] = width;
    }
Y
Yihua Xu 已提交
60 61 62
  }
}

63 64
inline MKLDNNMemoryFormat GetWeightsFormat(MKLDNNMemoryFormat format,
                                           int groups, bool is_conv3d) {
Y
Yihua Xu 已提交
65
  if (is_conv3d) {
66
    return (groups == 1) ? format : MKLDNNMemoryFormat::goidhw;
Y
Yihua Xu 已提交
67
  } else {
68
    return (groups == 1) ? format : MKLDNNMemoryFormat::goihw;
Y
Yihua Xu 已提交
69 70 71
  }
}

72 73
static mkldnn::memory::data_type GetDstType(bool is_int8,
                                            bool force_fp32_output,
74
                                            std::string fuse_activation,
75 76 77
                                            bool fuse_residual_conn,
                                            const Tensor* residual_param) {
  auto dst_dt = mkldnn::memory::data_type::f32;  // uint8_t, int8_t, float
78 79 80 81 82 83 84
  if (is_int8) {
    dst_dt = (fuse_activation == "relu" || fuse_activation == "relu6")
                 ? mkldnn::memory::data_type::u8
                 : mkldnn::memory::data_type::s8;
    if (force_fp32_output) {
      dst_dt = mkldnn::memory::data_type::f32;
    }
85 86
    if (fuse_residual_conn && residual_param) {
      auto residual_dt = framework::ToMKLDNNDataType(residual_param->type());
87
      if (dst_dt != residual_dt) dst_dt = residual_dt;
88 89 90 91 92
    }
  }
  return dst_dt;
}

93
template <typename T, typename K>
94
class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
95 96 97
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
    PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
F
FDInSky 已提交
98
                   platform::errors::InvalidArgument("It must use CPUPlace."));
99 100 101 102 103
    bool is_INT8 =
        std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value;
    if (!is_INT8) {
      ComputeFP32(ctx);
    } else {
104
      std::string fuse_activation = ctx.Attr<std::string>("fuse_activation");
105 106 107
      bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection");
      bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
      auto residual_param = ctx.Input<Tensor>("ResidualData");
108
      auto dst_dt = GetDstType(true, force_fp32_output, fuse_activation,
109 110 111 112 113 114 115 116
                               fuse_residual_conn, residual_param);
      if (dst_dt == mkldnn::memory::data_type::f32) {
        ComputeINT8<float>(ctx);
      } else if (dst_dt == mkldnn::memory::data_type::u8) {
        ComputeINT8<uint8_t>(ctx);
      } else if (dst_dt == mkldnn::memory::data_type::s8) {
        ComputeINT8<int8_t>(ctx);
      }
117 118
    }
  }
119

120
  void ComputeFP32(const paddle::framework::ExecutionContext& ctx) const {
K
Krzysztof Binias 已提交
121 122
    const bool is_test = ctx.Attr<bool>("is_test");

123 124
    auto& dev_ctx =
        ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
125 126 127 128
    const auto& mkldnn_engine = dev_ctx.GetEngine();

    auto* input = ctx.Input<Tensor>("Input");
    auto* filter = ctx.Input<Tensor>("Filter");
129
    auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr;
130 131
    auto* output = ctx.Output<Tensor>("Output");

132
    PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
F
FDInSky 已提交
133 134 135 136 137 138
                      platform::errors::InvalidArgument(
                          "The input tensor's layout should be %d, but got %d.",
                          DataLayout::kMKLDNN, input->layout()));
    PADDLE_ENFORCE_NE(
        input->format(), MKLDNNMemoryFormat::undef,
        platform::errors::InvalidArgument("Wrong format set for Input tensor"));
139

F
FDInSky 已提交
140 141 142 143 144
    PADDLE_ENFORCE_EQ(
        filter->layout(), DataLayout::kMKLDNN,
        platform::errors::InvalidArgument(
            "The Filter tensor's layout should be %d, but got %d.",
            DataLayout::kMKLDNN, filter->layout()));
A
Adam 已提交
145
    PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
                      platform::errors::InvalidArgument(
                          "Wrong format set for Filter tensor"));

    PADDLE_ENFORCE_GE(input->dims().size(), 4,
                      platform::errors::InvalidArgument(
                          "Input must be with 4 or 5 dimensions, i.e. NCHW or "
                          "NCDHW, but got dimension = %d .",
                          input->dims().size()));
    PADDLE_ENFORCE_LE(input->dims().size(), 5,
                      platform::errors::InvalidArgument(
                          "Input must be with 4 or 5 dimensions, i.e. NCHW or "
                          "NCDHW, but got dimension = %d .",
                          input->dims().size()));

    PADDLE_ENFORCE_GE(filter->dims().size(), 4,
                      platform::errors::InvalidArgument(
                          "Filter must be with 4 or 5 dimensions, i.e. OIHW or "
                          "OIDHW, but got dimension = %d .",
                          filter->dims().size()));
    PADDLE_ENFORCE_LE(filter->dims().size(), 5,
                      platform::errors::InvalidArgument(
                          "Filter must be with 4 or 5 dimensions, i.e. OIHW or "
                          "OIDHW, but got dimension = %d .",
                          filter->dims().size()));
170

171
    if (bias) {
F
FDInSky 已提交
172 173 174 175 176
      PADDLE_ENFORCE_EQ(
          bias->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument(
              "The Bias tensor's layout should be %d, but got %d.",
              DataLayout::kMKLDNN, bias->layout()));
A
Adam 已提交
177
      PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
178 179
                        platform::errors::InvalidArgument(
                            "Got wrong format for Bias tensor."));
180

F
FDInSky 已提交
181 182 183 184 185
      PADDLE_ENFORCE_EQ(
          bias->dims().size(), 1,
          platform::errors::InvalidArgument("Bias must only have 1 dimension, "
                                            "i.e. X, but got dimension = %d .",
                                            bias->dims().size()));
186
    }
187

A
Adam 已提交
188 189 190 191 192 193 194 195 196
    std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
    std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));

    std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
    std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));

    std::vector<int> dilations_temp = ctx.Attr<std::vector<int>>("dilations");
    std::vector<int64_t> dilations(begin(dilations_temp), end(dilations_temp));

197 198 199
    std::string fuse_activation = ctx.Attr<std::string>("fuse_activation");
    float fuse_alpha = ctx.Attr<float>("fuse_alpha");
    float fuse_beta = ctx.Attr<float>("fuse_beta");
200
    bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection");
201
    int groups = ctx.Attr<int>("groups");
202
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
203
    bool is_conv3d = strides.size() == 3U;
204

205 206 207 208 209 210
    auto input_dims = input->dims();
    auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
    auto filter_dims = filter->dims();
    auto filter_data_dims =
        framework::slice_ddim(filter_dims, 2, filter_dims.size());

A
Adam 已提交
211
    auto ksize = framework::vectorize(filter_data_dims);
212 213 214 215

    UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                             data_dims, strides, ksize);

A
Adam 已提交
216 217
    std::vector<primitive> pipeline;

218
    PADDLE_ENFORCE(
219 220 221 222
        is_conv3d
            ? dilations.size() == 3 && dilations[0] == 1 && dilations[1] == 1 &&
                  dilations[2] == 1
            : dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1,
223 224 225 226 227
        "dilation in convolution is not implemented yet");

    const T* input_data = input->data<T>();
    const T* filter_data = filter->data<T>();

A
Adam 已提交
228 229
    auto src_tz = paddle::framework::vectorize(input->dims());
    auto weights_tz = paddle::framework::vectorize(filter->dims());
230
    int g = std::max(groups, 1);
A
Adam 已提交
231

232
    GetWeightsTz(weights_tz, g, is_conv3d);
A
Adam 已提交
233 234

    auto dst_tz = paddle::framework::vectorize(output->dims());
235

236
    // Get unique name for storing MKLDNN primitives
237
    const std::string key = platform::CreateKey(
H
hong 已提交
238
        src_tz, ctx.InputName("Input") + ctx.InputName("Filter"));
239

240
    auto src_format = input->format();
241
    MKLDNNMemoryFormat weights_format =
242 243 244 245 246 247
        GetWeightsFormat(filter->format(), g, is_conv3d);

    auto user_src_md = platform::MKLDNNMemDesc(
        {src_tz}, platform::MKLDNNGetDataType<T>(), src_format);
    auto user_weights_md = platform::MKLDNNMemDesc(
        {weights_tz}, platform::MKLDNNGetDataType<T>(), weights_format);
248 249 250 251 252

    /* create memory descriptor for convolution without specified format
     * ('any') which lets a primitive (convolution in this case) choose
     * the memory format preferred for best performance
     */
253 254 255 256
    // TODO(jczaja): This is workaround to make grad op UT's numerical
    // gradient computation proper as this op is called directly without
    // fetch op following it , so numercial grad is computed (in python)
    // using block formats which will give wrong results
257 258
    std::string data_format = ctx.Attr<std::string>("data_format");
    auto chosen_memory_format =
259 260
        is_test ? MKLDNNMemoryFormat::any
                : platform::data_format_to_memory_format(data_format);
261

262
    weights_format = MKLDNNMemoryFormat::any;
263
    // Check the format for user's special output
264
    if (chosen_memory_format != MKLDNNMemoryFormat::any) {
265 266 267 268
      if (is_conv3d) {
        chosen_memory_format =
            platform::MKLDNNFormatForSize(src_tz.size(), chosen_memory_format);
      }
269 270
    }

271
    auto src_md = platform::MKLDNNMemDesc(
272
        src_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
273
    auto weights_md = platform::MKLDNNMemDesc(
274
        weights_tz, platform::MKLDNNGetDataType<T>(), weights_format);
A
Adam 已提交
275
    std::vector<int64_t> bias_tz;
276
    auto dst_md = platform::MKLDNNMemDesc(
277
        dst_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
278

279 280
    platform::ConvMKLDNNHandler handler(dev_ctx, mkldnn_engine, key);

281
    // create a conv primitive descriptor and save it for usage in backward
282
    std::shared_ptr<mkldnn::convolution_forward::primitive_desc> conv_pd;
283 284
    auto fwd_prop_kind = is_test ? mkldnn::prop_kind::forward_inference
                                 : mkldnn::prop_kind::forward_training;
285
    if (bias) {
A
Adam 已提交
286
      bias_tz = paddle::framework::vectorize(bias->dims());
287
      auto bias_md = platform::MKLDNNMemDesc(
288
          bias_tz, platform::MKLDNNGetDataType<T>(), MKLDNNMemoryFormat::x);
289
      conv_pd = handler.AcquireConvolutionPrimitiveDescriptor(
290
          src_md, weights_md, bias_md, dst_md, strides, paddings, mkldnn_engine,
291
          fuse_activation, fuse_alpha, fuse_beta, fuse_residual_conn,
292
          fwd_prop_kind);
293
    } else {
294 295
      conv_pd = handler.AcquireConvolutionPrimitiveDescriptor(
          src_md, weights_md, boost::none, dst_md, strides, paddings,
296 297
          mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta,
          fuse_residual_conn, fwd_prop_kind);
298
    }
299

300
    // create mkldnn memory from input tensors (data/weights)
301 302
    auto user_src_memory_p =
        handler.AcquireSrcMemory(user_src_md, to_void_cast<T>(input_data));
303
    auto user_weights_memory_p = handler.AcquireWeightsMemory(
304
        user_weights_md, to_void_cast<T>(filter_data));
305

306 307 308 309 310
    // create reorder primitive if the input format is not the preferred one
    auto src_memory_p =
        handler.AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline);
    auto weights_memory_p = handler.AcquireWeightsMemoryFromPrimitive(
        user_weights_memory_p, pipeline, is_test);
311

312
    std::shared_ptr<mkldnn::memory> dst_memory_p, user_residual_memory_p;
313

314
    if (fuse_residual_conn) {
315 316
      auto residual_param = ctx.Input<Tensor>("ResidualData");
      auto residual_param_data = residual_param->data<T>();
317

318 319
      PADDLE_ENFORCE_NE(
          residual_param_data, nullptr,
F
FDInSky 已提交
320 321 322 323 324 325 326 327 328 329
          platform::errors::InvalidArgument(
              "Provide data if you want MKLDNN conv+elementwise_add fusion"));
      PADDLE_ENFORCE_EQ(
          output->dims(), residual_param->dims(),
          platform::errors::InvalidArgument(
              "Output and elementwise parameter need to have the "
              "same dimension sizes, "
              "but got output's dimension = %d and residual param's dimension "
              "= %d .",
              output->dims().size(), residual_param->dims().size()));
330

331
      if (residual_param->format() != handler.GetDstFormat()) {
332 333
        auto output_data =
            output->mutable_data<T>(ctx.GetPlace(), handler.GetDstMemorySize());
334
        auto residual_data_tz =
A
Adam 已提交
335
            paddle::framework::vectorize(residual_param->dims());
336 337 338 339 340
        auto residual_data_type =
            paddle::framework::ToMKLDNNDataType(residual_param->type());

        auto user_residual_md = platform::MKLDNNMemDesc(
            residual_data_tz, residual_data_type, residual_param->format());
341
        user_residual_memory_p = handler.AcquireResidualDataMemory(
342
            user_residual_md, to_void_cast<T>(residual_param_data));
343 344 345

        dst_memory_p = handler.AcquireDstMemoryFromResidualDataMemory(
            user_residual_memory_p, to_void_cast<T>(output_data), pipeline);
346
      } else {
347 348 349 350 351
        // Changing ShareDataWith to TensorCopy results in performance drop
        // on ResNet architectures
        // (https://github.com/PaddlePaddle/Paddle/issues/22964)
        output->ShareDataWith(*residual_param);
        auto output_data = output->mutable_data<T>(ctx.GetPlace());
352 353
        dst_memory_p =
            handler.AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
354
      }
355
    } else {
356 357
      auto output_data =
          output->mutable_data<T>(ctx.GetPlace(), handler.GetDstMemorySize());
358 359
      dst_memory_p =
          handler.AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
360
    }
361

A
Adam 已提交
362 363 364
    auto conv_p = handler.AcquireConvolution();

    mkldnn::stream astream(mkldnn_engine);
365 366 367
    if (bias) {
      const T* bias_data = bias->data<T>();
      auto user_bias_md = platform::MKLDNNMemDesc(
368
          {bias_tz}, platform::MKLDNNGetDataType<T>(), MKLDNNMemoryFormat::x);
A
Adam 已提交
369
      auto user_bias_memory_p =
370 371
          handler.AcquireBiasMemory(user_bias_md, to_void_cast<T>(bias_data));

A
Adam 已提交
372
      auto bias_memory_p =
373
          handler.AcquireBiasMemoryFromPrimitive(user_bias_memory_p, pipeline);
A
Adam 已提交
374 375 376 377 378 379

      conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                                {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                {MKLDNN_ARG_BIAS, *bias_memory_p},
                                {MKLDNN_ARG_DST, *dst_memory_p}});

380
    } else {
A
Adam 已提交
381 382 383
      conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                                {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                {MKLDNN_ARG_DST, *dst_memory_p}});
384
    }
A
Adam 已提交
385
    astream.wait();
386

387 388
    output->set_layout(DataLayout::kMKLDNN);
    output->set_format(GetMKLDNNFormat(*dst_memory_p));
389
  }
390
  template <typename T_out>
391 392 393 394 395 396 397 398 399 400
  void ComputeINT8(const paddle::framework::ExecutionContext& ctx) const {
    const bool is_test = ctx.Attr<bool>("is_test");

    auto& dev_ctx =
        ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
    const auto& mkldnn_engine = dev_ctx.GetEngine();

    auto* input = ctx.Input<Tensor>("Input");
    auto* output = ctx.Output<Tensor>("Output");

401
    PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
F
FDInSky 已提交
402 403 404
                      platform::errors::InvalidArgument(
                          "The input tensor's layout should be %d, but got %d.",
                          DataLayout::kMKLDNN, input->layout()));
A
Adam 已提交
405
    PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418
                      platform::errors::InvalidArgument(
                          "Got wrong format for Input tensor."));

    PADDLE_ENFORCE_GE(input->dims().size(), 4,
                      platform::errors::InvalidArgument(
                          "Input must be with 4 or 5 dimensions, i.e. NCHW or "
                          "NCDHW, but got dimension = %d .",
                          input->dims().size()));
    PADDLE_ENFORCE_LE(input->dims().size(), 5,
                      platform::errors::InvalidArgument(
                          "Input must be with 4 or 5 dimensions, i.e. NCHW or "
                          "NCDHW, but got dimension = %d .",
                          input->dims().size()));
419

420
    std::string fuse_activation = ctx.Attr<std::string>("fuse_activation");
X
xiaolil1 已提交
421
    bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection");
422 423
    bool unsigned_output =
        (fuse_activation == "relu" || fuse_activation == "relu6");
424

425 426
    const T* input_data = input->data<T>();

A
Adam 已提交
427
    auto src_tz = paddle::framework::vectorize(input->dims());
428

X
xiaolil1 已提交
429 430
    mkldnn::memory::data_type src_dt =
        paddle::framework::ToMKLDNNDataType(input->type());
431

L
lidanqing 已提交
432
    std::string key = platform::CreateKey(
H
hong 已提交
433
        src_tz, src_dt, ctx.InputName("Input") + ctx.InputName("Filter"));
434

435 436
    const std::string key_conv_pd = key + "@conv_pd";
    bool need_s8_to_u8 = false;
437 438 439
    std::shared_ptr<mkldnn::convolution_forward> conv_p;
    std::shared_ptr<mkldnn::memory> src_memory_p;
    std::shared_ptr<mkldnn::memory> user_src_memory_p;
440
    std::shared_ptr<mkldnn::memory> dst_memory_p;
441
    std::vector<primitive> pipeline;
442
    std::shared_ptr<mkldnn::convolution_forward::primitive_desc> conv_pd;
443 444 445 446 447 448 449 450 451
    std::shared_ptr<platform::ConvMKLDNNHandler> handler;

    // This is workaround for hacky implementation
    // of conv int8 mkl-dnn. Once conv fp32 and conv int8
    // are merged/unified, this will disappear
    std::string key_tid = "";
    if (platform::get_cur_mkldnn_session_id() ==
        platform::kMKLDNNSessionID_Default) {
      key_tid = "-t:" + platform::ThreadIDasStr();
L
lidanqing 已提交
452
    }
453

454 455 456
    auto prim_key = key + key_tid + "@conv_p";
    auto dst_key = key + key_tid + "@dst_mem_p";
    auto src_key = key + key_tid + "@src_mem_p";
A
Adam 已提交
457 458
    auto weights_key = key + key_tid + "@weights_mem_p";
    auto bias_key = key + key_tid + "@bias_mem_p";
459
    auto user_src_key = key + key_tid + "@user_src_mem_p";
A
Adam 已提交
460
    auto user_residual_key = key + key_tid + "@user_residual_data_mem_p";
461 462 463 464 465 466
    auto src_reorder_key = key + key_tid + "@src_mem_preorder_p";
    auto residual_reorder_key = key + key_tid + "@residual_data_mem_preorder_p";

    conv_p = std::static_pointer_cast<mkldnn::convolution_forward>(
        dev_ctx.GetBlob(prim_key));

A
Adam 已提交
467 468
    mkldnn::stream astream(mkldnn_engine);

469
    if (conv_p == nullptr || !is_test) {
470 471 472 473 474 475
      float fuse_alpha = ctx.Attr<float>("fuse_alpha");
      float fuse_beta = ctx.Attr<float>("fuse_beta");
      bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");

      auto* filter = ctx.Input<Tensor>("Filter");

F
FDInSky 已提交
476 477 478 479 480
      PADDLE_ENFORCE_EQ(
          filter->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument(
              "The filter tensor's layout should be %d, but got %d.",
              DataLayout::kMKLDNN, filter->layout()));
A
Adam 已提交
481
      PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494
                        platform::errors::InvalidArgument(
                            "Got wrong format for Filter tensor."));

      PADDLE_ENFORCE_GE(filter->dims().size(), 4,
                        platform::errors::InvalidArgument(
                            "Filter must be with 4 or 5 dimensions, i.e. OIHW "
                            "or OIDHW, but got dimensions = %d .",
                            filter->dims().size()));
      PADDLE_ENFORCE_LE(filter->dims().size(), 5,
                        platform::errors::InvalidArgument(
                            "Filter must be with 4 or 5 dimensions, i.e. OIHW "
                            "or OIDHW, but got dimensions = %d .",
                            filter->dims().size()));
495 496 497 498 499 500 501 502

      PADDLE_ENFORCE_EQ(
          !fuse_residual_conn || !force_fp32_output, true,
          "residual fusion does not support force output with fp32");

      auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr;

      if (bias) {
F
FDInSky 已提交
503 504 505 506 507
        PADDLE_ENFORCE_EQ(
            bias->layout(), DataLayout::kMKLDNN,
            platform::errors::InvalidArgument(
                "The bias tensor's layout should be %d, but got %d.",
                DataLayout::kMKLDNN, bias->layout()));
A
Adam 已提交
508
        PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
509 510
                          platform::errors::InvalidArgument(
                              "Got wrong format for Bias tensor."));
511 512

        PADDLE_ENFORCE_EQ(bias->dims().size(), 1,
F
FDInSky 已提交
513 514 515 516
                          platform::errors::InvalidArgument(
                              "Bias must only have 1 dimension, i.e. X, but "
                              "got dimension = %d .",
                              bias->dims().size()));
517 518
      }

A
Adam 已提交
519 520 521 522 523 524 525 526 527 528
      std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
      std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));

      std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
      std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));

      std::vector<int> dilations_temp = ctx.Attr<std::vector<int>>("dilations");
      std::vector<int64_t> dilations(begin(dilations_temp),
                                     end(dilations_temp));

529 530
      std::string padding_algorithm =
          ctx.Attr<std::string>("padding_algorithm");
531 532 533 534

      bool is_conv3d = strides.size() == 3U;

      PADDLE_ENFORCE_NE(is_conv3d, true,
F
FDInSky 已提交
535 536 537
                        platform::errors::InvalidArgument(
                            "int8 does not support conv3d currently, should "
                            "set param is_conv3d as False"));
538

539 540 541 542 543 544
      auto input_dims = input->dims();
      auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
      auto filter_dims = filter->dims();
      auto filter_data_dims =
          framework::slice_ddim(filter_dims, 2, filter_dims.size());

A
Adam 已提交
545
      auto ksize = framework::vectorize(filter_data_dims);
546 547 548 549

      UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                               data_dims, strides, ksize);

550
      int groups = ctx.Attr<int>("groups");
A
Adam 已提交
551
      auto weights_tz = paddle::framework::vectorize(filter->dims());
552 553 554
      int g = std::max(groups, 1);

      GetWeightsTz(weights_tz, g, is_conv3d);
A
Adam 已提交
555
      auto dst_tz = paddle::framework::vectorize(output->dims());
556 557 558 559 560 561 562 563

      PADDLE_ENFORCE_EQ(
          is_conv3d
              ? dilations.size() == 3 && dilations[0] == 1 &&
                    dilations[1] == 1 && dilations[2] == 1
              : dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1,
          true, "dilation in convolution is not implemented yet");

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
      const K* filter_data = filter->data<K>();
      auto scale_in_data = ctx.Attr<float>("Scale_in");
      auto scale_in_eltwise_data = ctx.Attr<float>("Scale_in_eltwise");
      auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
      auto scale_out_data =
          force_fp32_output ? 1.0f : ctx.Attr<float>("Scale_out");
      float sum_scale =
          fuse_residual_conn ? scale_out_data / scale_in_eltwise_data : 1.0f;

      bool is_multi_channel = scale_weights_data.size() > 1;

      int count = is_multi_channel ? (g > 1 ? (weights_tz)[1] * (weights_tz)[0]
                                            : (weights_tz)[0])
                                   : 1;
      std::vector<float> output_shift_scale(count);
#pragma omp parallel for if (count > 1)
      for (int i = 0; i < count; i++) {
        if (scale_weights_data[i] == 0.0)
          output_shift_scale[i] =
              scale_out_data;  // weights data will contain 0
                               // in some models, then weights
                               // scale couldn't be calculated
        else
          output_shift_scale[i] =
              static_cast<float>(static_cast<double>(scale_out_data) /
                                 (static_cast<double>(scale_in_data) *
                                  static_cast<double>(scale_weights_data[i])));
      }
L
lidanqing 已提交
592

593 594 595 596 597 598 599 600 601 602
      auto user_src_md =
          platform::MKLDNNMemDesc({src_tz}, src_dt, input->format());
      auto user_weights_md = platform::MKLDNNMemDesc(
          {weights_tz}, platform::MKLDNNGetDataType<K>(),
          ((g) == 1) ? MKLDNNMemoryFormat::oihw : MKLDNNMemoryFormat::goihw);

      /* create memory descriptor for convolution without specified format
      * ('any') which lets a primitive (convolution in this case) choose
      * the memory format preferred for best performance
      */
603
      auto chosen_memory_format = MKLDNNMemoryFormat::any;
604

A
Adam 已提交
605
      std::vector<int64_t> bias_tz;
606 607 608 609 610 611 612 613 614 615 616 617 618

      auto src_md =
          platform::MKLDNNMemDesc(src_tz, src_dt, chosen_memory_format);
      auto weights_md = platform::MKLDNNMemDesc(
          weights_tz, memory::data_type::s8, chosen_memory_format);
      auto dst_md = platform::MKLDNNMemDesc(
          dst_tz, platform::MKLDNNGetDataType<T_out>(), chosen_memory_format);

      handler.reset(
          new platform::ConvMKLDNNHandler(dev_ctx, mkldnn_engine, key));
      // create a conv primitive descriptor and save it for usage in backward
      auto propagation = is_test ? mkldnn::prop_kind::forward_scoring
                                 : mkldnn::prop_kind::forward_training;
L
lidanqing 已提交
619

620
      if (bias) {
A
Adam 已提交
621
        bias_tz = paddle::framework::vectorize(bias->dims());
622 623 624 625 626 627 628 629 630 631 632 633
        auto bias_md = platform::MKLDNNMemDesc(bias_tz, memory::data_type::s32,
                                               MKLDNNMemoryFormat::x);
        conv_pd = handler->AcquireConvolutionPrimitiveDescriptor(
            src_md, weights_md, bias_md, dst_md, strides, paddings,
            mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta,
            fuse_residual_conn, propagation, output_shift_scale, sum_scale);
      } else {
        conv_pd = handler->AcquireConvolutionPrimitiveDescriptor(
            src_md, weights_md, boost::none, dst_md, strides, paddings,
            mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta,
            fuse_residual_conn, propagation, output_shift_scale, sum_scale);
      }
L
lidanqing 已提交
634

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
      // create mkldnn memory from input tensors (data/weights)
      user_src_memory_p =
          handler->AcquireSrcMemory(user_src_md, to_void_cast<T>(input_data));
      auto user_weights_memory_p = handler->AcquireWeightsMemory(
          user_weights_md, to_void_cast<K>(filter_data));

      // create reorder primitive if the input format is not the preferred one
      src_memory_p =
          handler->AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline);

      std::shared_ptr<mkldnn::memory> weights_memory_p;
      int mask_reorder =
          is_multi_channel ? ((g != 1) ? (1 << 1) + (1 << 0) : 1 << 0) : 0;
      weights_memory_p = handler->AcquireWeightsMemoryFromPrimitive(
          user_weights_memory_p, pipeline, is_test, true, scale_weights_data,
          mask_reorder);

      if (fuse_residual_conn) {
        auto residual_param = ctx.Input<Tensor>("ResidualData");
F
FDInSky 已提交
654 655 656 657 658 659 660
        PADDLE_ENFORCE_EQ(
            output->dims(), residual_param->dims(),
            platform::errors::InvalidArgument(
                "Output and elementwise parameter need to have the "
                "same dimension sizes, but got output's dimension = %d"
                " and residual param's dimension =%d .",
                output->dims().size(), residual_param->dims().size()));
661 662 663 664
        auto residual_dt =
            paddle::framework::ToMKLDNNDataType(residual_param->type());
        if (residual_param->format() != handler->GetDstFormat()) {
          auto residual_data_tz =
A
Adam 已提交
665
              paddle::framework::vectorize(residual_param->dims());
666 667 668 669 670 671
          auto user_residual_md = platform::MKLDNNMemDesc(
              residual_data_tz, residual_dt, residual_param->format());
          dst_memory_p = platform::SetDstMemory<T_out>(
              ctx, output, residual_param, user_residual_md, handler,
              &pipeline);
        } else {
672
          output->ShareDataWith(*residual_param);
673 674 675 676 677 678 679 680
          dst_memory_p = platform::SetDstMemory<T_out>(ctx, output, handler);
        }
        need_s8_to_u8 =
            (platform::MKLDNNGetDataType<T_out>() == memory::data_type::s8) &&
            unsigned_output;
      } else {
        dst_memory_p = platform::SetDstMemory<T_out>(ctx, output, handler);
      }
L
lidanqing 已提交
681

682 683
      // create convolution op primitive
      auto scale_bias_key = key + "@scale_bias";
A
Adam 已提交
684
      conv_p = handler->AcquireConvolution();
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
      if (bias) {
        const K* bias_data = bias->data<K>();
        auto user_bias_md = platform::MKLDNNMemDesc(
            {bias_tz}, platform::MKLDNNGetDataType<K>(), MKLDNNMemoryFormat::x);
        auto user_bias_memory_p = handler->AcquireBiasMemory(
            user_bias_md, to_void_cast<K>(bias_data));
        std::shared_ptr<mkldnn::memory> bias_memory_p;
        int mask_reorder = is_multi_channel ? 1 << 0 : 1;
        int count =
            is_multi_channel
                ? (g > 1 ? (weights_tz)[1] * (weights_tz)[0] : (weights_tz)[0])
                : 1;
        std::vector<float> scale_bias_data(count);
#pragma omp parallel for if (count > 1)
        for (int i = 0; i < count; i++) {
          scale_bias_data[i] = scale_in_data * scale_weights_data[i];
        }
        bias_memory_p = handler->AcquireBiasMemoryFromPrimitive(
            user_bias_memory_p, pipeline, is_test, true, scale_bias_data,
            mask_reorder);
A
Adam 已提交
705 706 707 708
        conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                                  {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                  {MKLDNN_ARG_BIAS, *bias_memory_p},
                                  {MKLDNN_ARG_DST, *dst_memory_p}});
709
      } else {
A
Adam 已提交
710 711 712
        conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                                  {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                  {MKLDNN_ARG_DST, *dst_memory_p}});
713 714
      }
    } else {
A
Adam 已提交
715
      auto src_memory_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
716 717 718 719 720 721 722
          dev_ctx.GetBlob(src_reorder_key));
      src_memory_p =
          std::static_pointer_cast<mkldnn::memory>(dev_ctx.GetBlob(src_key));
      if (src_memory_reorder_p) {
        user_src_memory_p = std::static_pointer_cast<mkldnn::memory>(
            dev_ctx.GetBlob(user_src_key));
        user_src_memory_p->set_data_handle(to_void_cast<T>(input_data));
A
Adam 已提交
723 724 725
        src_memory_reorder_p->execute(astream, *user_src_memory_p,
                                      *src_memory_p);
        astream.wait();
726 727 728
      } else if (src_memory_p) {
        src_memory_p->set_data_handle(to_void_cast<T>(input_data));
      }
A
Adam 已提交
729 730
      auto weights_memory_p = std::static_pointer_cast<mkldnn::memory>(
          dev_ctx.GetBlob(weights_key));
731 732 733 734 735 736 737 738 739
      dst_memory_p =
          std::static_pointer_cast<mkldnn::memory>(dev_ctx.GetBlob(dst_key));
      conv_pd =
          std::static_pointer_cast<mkldnn::convolution_forward::primitive_desc>(
              dev_ctx.GetBlob(key_conv_pd));
      if (conv_pd) {
        handler.reset(new platform::ConvMKLDNNHandler(conv_pd, dev_ctx,
                                                      mkldnn_engine, key));
      }
L
lidanqing 已提交
740

741 742
      if (fuse_residual_conn) {
        auto residual_param = ctx.Input<Tensor>("ResidualData");
743
        output->ShareDataWith(*residual_param);
744 745 746
        need_s8_to_u8 =
            (platform::MKLDNNGetDataType<T_out>() == memory::data_type::s8) &&
            unsigned_output;
X
xiaolil1 已提交
747
      }
748
      platform::SetDstMemoryHandler<T_out>(ctx, output, handler, dst_memory_p);
L
lidanqing 已提交
749

A
Adam 已提交
750
      auto residual_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
751 752
          dev_ctx.GetBlob(residual_reorder_key));
      if (residual_reorder_p) {
A
Adam 已提交
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
        auto user_residual_data_p = std::static_pointer_cast<mkldnn::memory>(
            dev_ctx.GetBlob(user_residual_key));
        residual_reorder_p->execute(astream, *user_residual_data_p,
                                    *dst_memory_p);
        astream.wait();
      }

      auto bias_memory_p =
          std::static_pointer_cast<mkldnn::memory>(dev_ctx.GetBlob(bias_key));

      if (bias_memory_p) {
        conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                                  {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                  {MKLDNN_ARG_BIAS, *bias_memory_p},
                                  {MKLDNN_ARG_DST, *dst_memory_p}});
      } else {
        conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                                  {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                  {MKLDNN_ARG_DST, *dst_memory_p}});
772 773
      }
    }
A
Adam 已提交
774
    astream.wait();
775
    if (need_s8_to_u8) {
X
xiaolil1 已提交
776 777
      output->mutable_data<uint8_t>(ctx.GetPlace());
    }
778 779 780
    output->set_layout(DataLayout::kMKLDNN);
    output->set_format(GetMKLDNNFormat(*dst_memory_p));
  }
781 782 783
};

template <typename T>
784
class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
785 786 787
 public:
  void Compute(const paddle::framework::ExecutionContext& ctx) const override {
    PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
F
FDInSky 已提交
788
                   platform::errors::InvalidArgument("It must use CPUPlace."));
789

790 791
    auto& dev_ctx =
        ctx.template device_context<platform::MKLDNNDeviceContext>();
792 793 794 795 796 797 798 799 800
    const auto& mkldnn_engine = dev_ctx.GetEngine();

    const Tensor* input = ctx.Input<Tensor>("Input");
    const Tensor* filter = ctx.Input<Tensor>("Filter");
    const Tensor* output_grad =
        ctx.Input<Tensor>(framework::GradVarName("Output"));
    Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
    Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));

801
    PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
F
FDInSky 已提交
802 803 804
                      platform::errors::InvalidArgument(
                          "The input tensor's layout should be %d, but got %d.",
                          DataLayout::kMKLDNN, input->layout()));
A
Adam 已提交
805
    PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
806 807
                      platform::errors::InvalidArgument(
                          "Got wrong format for Input tensor."));
808

F
FDInSky 已提交
809 810 811 812 813
    PADDLE_ENFORCE_EQ(
        filter->layout(), DataLayout::kMKLDNN,
        platform::errors::InvalidArgument(
            "The filter tensor's layout should be %d, but got %d.",
            DataLayout::kMKLDNN, filter->layout()));
A
Adam 已提交
814
    PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
F
FDInSky 已提交
815 816
                      platform::errors::InvalidArgument(
                          "Got wrong format for Filter tensor."));
817

F
FDInSky 已提交
818 819 820 821 822
    PADDLE_ENFORCE_EQ(
        output_grad->layout(), DataLayout::kMKLDNN,
        platform::errors::InvalidArgument(
            "The output_grad tensor's layout should be %d, but got %d.",
            DataLayout::kMKLDNN, output_grad->layout()));
A
Adam 已提交
823
    PADDLE_ENFORCE_NE(output_grad->format(), MKLDNNMemoryFormat::undef,
824 825 826 827
                      "Wrong format set for output_grad tensor");

    PADDLE_ENFORCE_EQ(
        ctx.Attr<bool>("is_test"), false,
F
FDInSky 已提交
828 829
        platform::errors::InvalidArgument(
            "is_test attribute should be set to False in training phase."));
830

831 832
    if (!input_grad && !filter_grad) return;

A
Adam 已提交
833 834 835 836 837 838 839 840 841
    std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
    std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));

    std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
    std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));

    std::vector<int> dilations_temp = ctx.Attr<std::vector<int>>("dilations");
    std::vector<int64_t> dilations(begin(dilations_temp), end(dilations_temp));

842
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
A
Adam 已提交
843

844
    int groups = ctx.Attr<int>("groups");
845

846
    bool is_conv3d = strides.size() == 3U;
847 848 849 850 851 852
    const T* input_data = input->data<T>();
    const T* filter_data = filter->data<T>();
    const T* output_grad_data = output_grad->data<T>();
    T* input_grad_data = nullptr;
    T* filter_grad_data = nullptr;

853 854 855 856 857 858
    auto input_dims = input->dims();
    auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
    auto filter_dims = filter->dims();
    auto filter_data_dims =
        framework::slice_ddim(filter_dims, 2, filter_dims.size());

A
Adam 已提交
859
    auto ksize = framework::vectorize(filter_data_dims);
860 861 862 863

    UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                             data_dims, strides, ksize);

A
Adam 已提交
864 865 866
    auto src_tz = paddle::framework::vectorize(input->dims());
    auto weights_tz = paddle::framework::vectorize(filter->dims());

867
    int g = std::max(groups, 1);
868
    GetWeightsTz(weights_tz, g, is_conv3d);
A
Adam 已提交
869 870
    auto dst_tz = paddle::framework::vectorize(output_grad->dims());

871
    auto src_format = input->format();
872
    MKLDNNMemoryFormat weights_format =
Y
Yihua Xu 已提交
873
        GetWeightsFormat(filter->format(), g, is_conv3d);
874

875
    // Get an unique name from "argument" name of "input" and "Filter" variable
J
Jacek Czaja 已提交
876
    // as well as attributes of primitive to be created
877
    // This name will be used as key when saving info into device context
878
    const std::string key = platform::CreateKey(
H
hong 已提交
879
        src_tz, ctx.InputName("Input") + ctx.InputName("Filter"));
880 881

    const std::string key_conv_pd = key + "@conv_pd";
882
    std::vector<primitive> pipeline;
883

884 885
    // Create user memory descriptors
    auto user_src_md = platform::MKLDNNMemDesc(
886
        {src_tz}, platform::MKLDNNGetDataType<T>(), src_format);
887
    auto user_weights_md = platform::MKLDNNMemDesc(
888
        {weights_tz}, platform::MKLDNNGetDataType<T>(), weights_format);
889 890
    auto user_diff_dst_md = platform::MKLDNNMemDesc(
        {dst_tz}, platform::MKLDNNGetDataType<T>(), output_grad->format());
891 892 893 894 895

    /* create memory descriptor for conv backward without specified format
     * ('any') which lets a primitive (conv backward in this case) choose
     * the memory format preferred for best performance
     */
896 897 898 899 900 901 902 903 904

    // TODO(jczaja): Once GRAD NHWC is working then format 'any'
    // should be used exclusively. But till forward pass enforce
    // NCHW for training we need to have NCHW here as well
    // to avoid performance degradation in relu_grad and pool2d_grad
    std::string data_format = ctx.Attr<std::string>("data_format");
    auto chosen_memory_format =
        platform::data_format_to_memory_format(data_format);

905
    weights_format = MKLDNNMemoryFormat::any;
906 907 908 909 910 911 912
    // Check the format for user's special output
    if (chosen_memory_format != MKLDNNMemoryFormat::any) {
      if (is_conv3d) {
        chosen_memory_format =
            platform::MKLDNNFormatForSize(src_tz.size(), chosen_memory_format);
      }
    }
913

914
    auto src_md = platform::MKLDNNMemDesc(
915
        src_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
916
    auto diff_src_md = platform::MKLDNNMemDesc(
917
        src_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
918
    auto weights_md = platform::MKLDNNMemDesc(
919
        weights_tz, platform::MKLDNNGetDataType<T>(), weights_format);
920
    auto diff_weights_md = platform::MKLDNNMemDesc(
921
        weights_tz, platform::MKLDNNGetDataType<T>(), weights_format);
922
    auto diff_dst_md = platform::MKLDNNMemDesc(
923
        dst_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
924
    // Retrieve conv_pd from device context
925 926 927
    auto conv_pd =
        std::static_pointer_cast<mkldnn::convolution_forward::primitive_desc>(
            dev_ctx.GetBlob(key_conv_pd));
928
    PADDLE_ENFORCE_NE(conv_pd, nullptr,
F
FDInSky 已提交
929 930
                      platform::errors::InvalidArgument(
                          "Fail to find conv_pd in device context"));
931

932 933
    auto mkldnn_paddings = platform::ToMkldnnPadding(paddings);

934 935
    // create backward convolution weights primitive descriptor
    auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc(
A
Adam 已提交
936 937 938
        mkldnn::algorithm::convolution_direct, src_md, diff_weights_md,
        diff_dst_md, strides, mkldnn_paddings[0], mkldnn_paddings[1]);

939 940 941 942 943 944
    auto conv_bwd_weights_pd =
        std::make_shared<mkldnn::convolution_backward_weights::primitive_desc>(
            conv_bwd_weights_desc, mkldnn_engine, *conv_pd);

    // create backward convolution data primitive descriptor
    auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc(
A
Adam 已提交
945 946 947
        mkldnn::algorithm::convolution_direct, diff_src_md, weights_md,
        diff_dst_md, strides, mkldnn_paddings[0], mkldnn_paddings[1]);

948 949 950 951
    auto conv_bwd_data_pd =
        std::make_shared<mkldnn::convolution_backward_data::primitive_desc>(
            conv_bwd_data_desc, mkldnn_engine, *conv_pd);

J
Jacek Czaja 已提交
952 953 954
    platform::ConvMKLDNNHandler handler(conv_pd, conv_bwd_data_pd,
                                        conv_bwd_weights_pd, dev_ctx,
                                        mkldnn_engine, key);
955 956 957 958 959 960 961 962

    // create mkldnn memory from input tensors (data/weights)
    auto user_src_memory_p =
        handler.AcquireSrcMemory(user_src_md, to_void_cast<T>(input_data));
    auto user_weights_memory_p = handler.AcquireWeightsMemory(
        user_weights_md, to_void_cast<T>(filter_data));
    auto user_diff_dst_memory_p = handler.AcquireDiffDstMemory(
        user_diff_dst_md, to_void_cast<T>(output_grad_data));
A
Adam 已提交
963
    mkldnn::stream astream(mkldnn_engine);
964
    if (filter_grad) {
965 966
      auto src_memory_p = handler.AcquireSrcMemoryFromWeightsPrimitive(
          user_src_memory_p, pipeline);
967

968 969 970 971
      auto diff_dst_memory_4filter_p =
          handler.AcquireDiffDstMemoryFromWeightsPrimitive(
              user_diff_dst_memory_p, pipeline);

972
      const size_t size = handler.GetDiffWeightsMemorySize();
973
      filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace(), size);
974

975 976 977 978
      auto diff_weights_memory_p =
          handler.AcquireDiffWeightsMemoryFromWeightsPrimitive(
              reinterpret_cast<void*>(filter_grad_data));

A
Adam 已提交
979
      auto conv_bwd_weights_p = handler.AcquireConvolutionBackwardWeights();
980

A
Adam 已提交
981 982 983 984 985 986
      // TODO(grygielski) why no bias_diff?
      conv_bwd_weights_p->execute(
          astream, {{MKLDNN_ARG_SRC, *src_memory_p},
                    {MKLDNN_ARG_DIFF_DST, *diff_dst_memory_4filter_p},
                    {MKLDNN_ARG_DIFF_WEIGHTS, *diff_weights_memory_p}});
      astream.wait();
987

988 989
      filter_grad->set_layout(DataLayout::kMKLDNN);
      filter_grad->set_format(GetMKLDNNFormat(*diff_weights_memory_p));
990 991
    }
    if (input_grad) {
992 993 994 995 996 997 998
      auto weights_memory_p = handler.AcquireWeightsMemoryFromDataPrimitive(
          user_weights_memory_p, pipeline);

      auto diff_dst_memory_4data_p =
          handler.AcquireDiffDstMemoryFromDataPrimitive(user_diff_dst_memory_p,
                                                        pipeline);

999
      const size_t size = handler.GetDiffSourceMemorySize();
1000
      input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace(), size);
1001

1002 1003 1004
      auto diff_src_memory_p = handler.AcquireDiffSrcMemoryFromDataPrimitive(
          reinterpret_cast<void*>(input_grad_data));

A
Adam 已提交
1005
      auto conv_bwd_data_p = handler.AcquireConvolutionBackwardData();
1006

A
Adam 已提交
1007 1008 1009 1010 1011
      conv_bwd_data_p->execute(astream,
                               {{MKLDNN_ARG_WEIGHTS, *weights_memory_p},
                                {MKLDNN_ARG_DIFF_DST, *diff_dst_memory_4data_p},
                                {MKLDNN_ARG_DIFF_SRC, *diff_src_memory_p}});
      astream.wait();
1012

1013 1014
      input_grad->set_layout(DataLayout::kMKLDNN);
      input_grad->set_format(GetMKLDNNFormat(*diff_src_memory_p));
1015
    }
X
xiaolil1 已提交
1016
  }
1017
};
1018

1019 1020 1021 1022 1023
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

X
Xin Pan 已提交
1024 1025 1026
REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN,
                                    ::paddle::platform::CPUPlace, FP32,
                                    ops::kConvMKLDNNFP32,
1027
                                    ops::ConvMKLDNNOpKernel<float, float>);
1028 1029 1030

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN,
                                    ::paddle::platform::CPUPlace, U8,
1031
                                    ops::kConvMKLDNNINT8,
1032
                                    ops::ConvMKLDNNOpKernel<uint8_t, float>);
1033 1034 1035

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(conv2d, MKLDNN,
                                    ::paddle::platform::CPUPlace, S8,
1036
                                    ops::kConvMKLDNNINT8,
1037
                                    ops::ConvMKLDNNOpKernel<int8_t, float>);
X
Xin Pan 已提交
1038 1039 1040 1041 1042

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(conv2d_grad, MKLDNN,
                                    ::paddle::platform::CPUPlace, FP32,
                                    ops::kConvMKLDNNFP32,
                                    ops::ConvMKLDNNGradOpKernel<float>);
1043 1044 1045 1046

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(conv3d, MKLDNN,
                                    ::paddle::platform::CPUPlace, FP32,
                                    ops::kConvMKLDNNFP32,
1047
                                    ops::ConvMKLDNNOpKernel<float, float>);
1048 1049 1050 1051 1052

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(conv3d_grad, MKLDNN,
                                    ::paddle::platform::CPUPlace, FP32,
                                    ops::kConvMKLDNNFP32,
                                    ops::ConvMKLDNNGradOpKernel<float>);