conv_transpose_mkldnn_op.cc 18.0 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

15
#include "boost/optional.hpp"
J
Jacek Czaja 已提交
16 17 18
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
19
#include "paddle/fluid/operators/conv_op.h"
J
Jacek Czaja 已提交
20 21 22 23 24 25 26 27
#include "paddle/fluid/platform/mkldnn_reuse.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using framework::DataLayout;

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
inline mkldnn::memory::dims GetWeightsTz(const Tensor* filter,
                                         const int groups) {
  auto iohw_weights_tz = framework::vectorize(filter->dims());
  auto weights_tz = iohw_weights_tz;

  // IOHW -> OIHW
  weights_tz[0] = iohw_weights_tz[1];
  weights_tz[1] = iohw_weights_tz[0];
  int g = std::max(groups, 1);
  platform::GetGroupConvWeightsTz(weights_tz, g);
  return weights_tz;
}

template <typename T, typename K, typename T_out>
class ConvTransposeMKLDNNHandlerT
43 44
    : public platform::MKLDNNHandlerNoCachingT<T,
                                               mkldnn::deconvolution_forward> {
J
Jacek Czaja 已提交
45
 public:
46 47
  ConvTransposeMKLDNNHandlerT(const framework::ExecutionContext& ctx,
                              const mkldnn::engine mkldnn_engine,
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
                              const Tensor* input, const Tensor* filter,
                              const Tensor* bias, Tensor* output)
      : platform::MKLDNNHandlerNoCachingT<T, mkldnn::deconvolution_forward>(
            mkldnn_engine, ctx.GetPlace()),
        is_test_(ctx.Attr<bool>("is_test")) {
    PADDLE_ENFORCE_EQ(is_test_, true,
                      platform::errors::InvalidArgument(
                          "ConvTransposeMKLDNN works only for inference. "
                          "The attribute \'is_test\' value should be set to "
                          "True, but got is_test=False."));

    PADDLE_ENFORCE_EQ(
        input->layout(), DataLayout::kMKLDNN,
        platform::errors::InvalidArgument(
            "Got wrong layout = %d for Input tensor.", input->layout()));
    PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
                      platform::errors::InvalidArgument(
                          "Got wrong format for Input tensor. The input "
                          "format is undefined."));

    PADDLE_ENFORCE_EQ(
        filter->layout(), DataLayout::kMKLDNN,
        platform::errors::InvalidArgument(
            "The filter tensor's laytout should be %d, but got %d.",
            DataLayout::kMKLDNN, filter->layout()));
    PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
                      platform::errors::InvalidArgument(
                          "Got wrong formats for Filter tensor."));

    PADDLE_ENFORCE_EQ(
        input->dims().size(), 4,
        platform::errors::InvalidArgument("Input must be with 4 dimensions, "
                                          "i.e. NCHW. but got dimension =%d",
                                          input->dims().size()));
    PADDLE_ENFORCE_EQ(
        filter->dims().size(), 4,
        platform::errors::InvalidArgument("Filter must be with 4 dimensions, "
                                          "i.e. OIHW, but got dimension =%d",
                                          filter->dims().size()));
F
FDInSky 已提交
87

88
    if (bias) {
F
FDInSky 已提交
89
      PADDLE_ENFORCE_EQ(
90
          bias->layout(), DataLayout::kMKLDNN,
91
          platform::errors::InvalidArgument(
92 93 94
              "The bias tensor's laytout should be %d, but got %d.",
              DataLayout::kMKLDNN, bias->layout()));
      PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
95
                        platform::errors::InvalidArgument(
96
                            "Got wrong format for Bias tensor."));
A
Adam 已提交
97

98
      PADDLE_ENFORCE_EQ(
99 100 101 102 103
          bias->dims().size(), 1,
          platform::errors::InvalidArgument("Bias must only have 1 dimension, "
                                            "i.e. X, but got dimension = %d .",
                                            bias->dims().size()));
    }
104

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
    mkldnn::memory::dims strides(begin(strides_temp), end(strides_temp));

    std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
    mkldnn::memory::dims paddings(begin(paddings_temp), end(paddings_temp));

    std::vector<int> dilations_temp = ctx.Attr<std::vector<int>>("dilations");
    mkldnn::memory::dims dilations(begin(dilations_temp), end(dilations_temp));

    int groups = ctx.Attr<int>("groups");
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");

    PADDLE_ENFORCE_EQ(
        strides.size(), 2,
        platform::errors::Unimplemented(
            "Now we only support 2d oneDNN convolution transpose op"));

    const auto& input_dims = input->dims();
    const auto data_dims =
        framework::slice_ddim(input_dims, 2, input_dims.size());
    const auto& filter_dims = filter->dims();
    const auto filter_data_dims =
        framework::slice_ddim(filter_dims, 2, filter_dims.size());

    const auto ksize = framework::vectorize(filter_data_dims);

    UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                             data_dims, strides, ksize);

    std::transform(dilations.begin(), dilations.end(), dilations.begin(),
                   [](int64_t i) { return i - 1; });

    const auto src_tz = framework::vectorize(input->dims());
    const auto weights_tz = GetWeightsTz(filter, groups);
    const auto dst_tz = framework::vectorize(output->dims());
    const auto mkldnn_paddings = platform::ToMkldnnPadding(paddings);

    /* create memory descriptor for convolution without specified format
     * ('any') which lets a primitive (convolution in this case) choose
     * the memory format preferred for best performance
     */
    const auto chosen_memory_format = MKLDNNMemoryFormat::any;
    const std::string fuse_activation =
        ctx.Attr<std::string>("fuse_activation");
    const float fuse_alpha = ctx.Attr<float>("fuse_alpha");
    const float fuse_beta = ctx.Attr<float>("fuse_beta");

    auto data_type = mkldnn::memory::data_type::f32;
    if (ctx.Attr<std::string>("mkldnn_data_type") == "bfloat16" ||
        std::is_same<T_out, platform::bfloat16>::value)
      data_type = mkldnn::memory::data_type::bf16;

    const auto src_md =
        platform::MKLDNNMemDesc(src_tz, data_type, chosen_memory_format);
    const auto weights_md =
        platform::MKLDNNMemDesc(weights_tz, data_type, chosen_memory_format);
    const auto dst_md = platform::MKLDNNMemDesc(
        dst_tz, platform::MKLDNNGetDataType<T_out>(), chosen_memory_format);

    const mkldnn::primitive_attr conv_trans_attr =
        CreatePostOps(fuse_activation, fuse_alpha, fuse_beta);
    auto fwd_prop_kind = is_test_ ? mkldnn::prop_kind::forward_inference
                                  : mkldnn::prop_kind::forward_training;
    if (bias) {
      std::vector<int64_t> bias_tz = framework::vectorize(bias->dims());
      const auto bias_md =
          platform::MKLDNNMemDesc(bias_tz, data_type, MKLDNNMemoryFormat::x);
      this->AcquireForwardPrimitiveDescriptor(
          conv_trans_attr, fwd_prop_kind, dnnl::algorithm::deconvolution_direct,
          src_md, weights_md, bias_md, dst_md, strides, dilations,
          mkldnn_paddings[0], mkldnn_paddings[1]);
    } else {
      this->AcquireForwardPrimitiveDescriptor(
          conv_trans_attr, fwd_prop_kind, dnnl::algorithm::deconvolution_direct,
          src_md, weights_md, dst_md, strides, dilations, mkldnn_paddings[0],
          mkldnn_paddings[1]);
181 182
    }
  }
J
Jacek Czaja 已提交
183

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
  mkldnn::primitive_attr CreatePostOps(const std::string& fuse_activation,
                                       const float& fuse_alpha,
                                       const float& fuse_beta) {
    mkldnn::primitive_attr conv_attr;
    mkldnn::post_ops post_operations;

    // Fusion with ReLU layer is executed through the PostOps feature. Create a
    // PostOps object and configure it to execute an eltwise relu operation.
    if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
                                     fuse_alpha, fuse_beta);
    } else if (fuse_activation == "relu6") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale,
                                     mkldnn::algorithm::eltwise_bounded_relu,
                                     fuse_alpha, fuse_beta);
    } else if (fuse_activation == "swish") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_swish,
                                     fuse_alpha, fuse_beta);
    }
    conv_attr.set_post_ops(post_operations);
    return conv_attr;
  }
J
Jacek Czaja 已提交
209

210 211
  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryWithReorder(
      const framework::Tensor* input) {
J
Jacek Czaja 已提交
212
    const T* input_data = input->data<T>();
213 214 215 216 217 218
    auto user_src_md = platform::MKLDNNMemDesc(
        framework::vectorize(input->dims()), platform::MKLDNNGetDataType<T>(),
        input->format());
    return platform::MKLDNNHandlerNoCachingT<T, mkldnn::deconvolution_forward>::
        AcquireMemoryWithReorder(user_src_md, this->fwd_pd_->src_desc(),
                                 platform::to_void_cast<T>(input_data));
219 220 221
  }

  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryWithReorder(
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
      const platform::MKLDNNDeviceContext& dev_ctx, const std::string& key,
      const framework::Tensor* filter, const int& groups) {
    const K* filter_data = filter->data<K>();
    auto weights_tz = GetWeightsTz(filter, groups);
    int g = std::max(groups, 1);

    auto user_src_md = platform::MKLDNNMemDesc(
        weights_tz, platform::MKLDNNGetDataType<K>(),
        (g == 1) ? filter->format() : MKLDNNMemoryFormat::goihw);

    auto iohw_weights_tz = framework::vectorize(filter->dims());
    // Custom Reorder from IOHW to OIHW
    auto iohw2oihw_reorder =
        [&iohw_weights_tz](const K* filter_data) -> std::shared_ptr<K> {
      int o = iohw_weights_tz[1];
      int c = iohw_weights_tz[0];
      int h = iohw_weights_tz[2];
      int w = iohw_weights_tz[3];
      std::shared_ptr<K> reordered_filter_data(new K[o * c * h * w](),
                                               std::default_delete<K[]>());
      for (int i = 0; i < c; ++i) {
        for (int j = 0; j < o; ++j) {
          int in_offset = j * h * w + i * o * h * w;
          int out_offset = j * c * h * w + i * h * w;
          std::memcpy(&(reordered_filter_data.get())[out_offset],
                      &filter_data[in_offset], h * w * sizeof(K));
248
        }
249 250 251 252
      }

      return reordered_filter_data;
    };
J
Jacek Czaja 已提交
253

254 255 256 257 258
    return this->template AcquireMemoryWithReorder<K>(
        dev_ctx, user_src_md, this->fwd_pd_->weights_desc(),
        platform::to_void_cast<K>(filter_data), key, "@weights_mem_p", is_test_,
        iohw2oihw_reorder);
  }
259

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
  template <typename F = T>
  std::shared_ptr<mkldnn::memory> AcquireMemoryWithReorder(
      const platform::MKLDNNDeviceContext& dev_ctx,
      const mkldnn::memory::desc& user_md,
      const mkldnn::memory::desc& target_md, void* ptr, const std::string& key,
      const std::string& suffix, bool is_persistent = false,
      std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {},
      const std::vector<float>& scale_data = {1.0f}, int mask = 0) {
    const auto target_key = key + suffix + "_target";
    const auto key_reorder_p = key + suffix + "reorder_p";
    const auto user_key = key + suffix + "_user";

    auto target_memory_p =
        std::static_pointer_cast<dnnl::memory>(dev_ctx.GetBlob(target_key));

    if (target_memory_p == nullptr) {
      if (custom_reorder_func) {
        auto reordered_data =
            custom_reorder_func(reinterpret_cast<const F*>(ptr));
        dev_ctx.SetBlob(key_reorder_p + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }
      auto user_memory_p =
          std::make_shared<dnnl::memory>(user_md, this->engine_, ptr);
      if (user_md != target_md) {
        target_memory_p =
            std::make_shared<mkldnn::memory>(target_md, this->engine_);
        dnnl::reorder::primitive_desc reorder_pdesc;
        if (platform::is_int8<T>()) {
          dnnl::primitive_attr attr;
          attr.set_output_scales(mask, scale_data);
          reorder_pdesc = dnnl::reorder::primitive_desc(*user_memory_p,
                                                        *target_memory_p, attr);
        } else {
          reorder_pdesc =
              dnnl::reorder::primitive_desc(*user_memory_p, *target_memory_p);
        }
        auto reorder_p = std::make_shared<dnnl::reorder>(reorder_pdesc);
        dev_ctx.SetBlob(key_reorder_p, reorder_p);

        auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      } else {
        target_memory_p = user_memory_p;
      }
      dev_ctx.SetBlob(user_key, user_memory_p);
      dev_ctx.SetBlob(target_key, target_memory_p);
    } else if (!is_persistent) {
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();

      auto user_memory_p =
          std::static_pointer_cast<dnnl::memory>(dev_ctx.GetBlob(user_key));
      user_memory_p->set_data_handle(ptr);

      // TODO(jczaja): Here we detect if reorder is cached it means it is needed
      // need to change this to get rid of keys
      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      }
J
Jacek Czaja 已提交
329
    }
330
    return target_memory_p;
331 332 333
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemoryWithReorder(
334 335 336 337 338 339 340 341 342
      const platform::MKLDNNDeviceContext& dev_ctx, const std::string& key,
      const framework::Tensor* bias) {
    const K* bias_data = bias->data<K>();
    auto user_bias_md = platform::MKLDNNMemDesc(
        framework::vectorize(bias->dims()), platform::MKLDNNGetDataType<K>(),
        MKLDNNMemoryFormat::x);
    return this->AcquireMemoryWithReorder(
        dev_ctx, user_bias_md, this->fwd_pd_->bias_desc(),
        platform::to_void_cast<K>(bias_data), key, "@bias_mem_p", is_test_);
343
  }
344 345 346

 private:
  const bool is_test_;
347
};
J
Jacek Czaja 已提交
348

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
template <typename T, typename K>
class ConvTransposeMKLDNNOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
                      platform::errors::PreconditionNotMet(
                          "Operator DNNL ConvTranspose must use CPUPlace"));
    const bool is_bfloat16 =
        ctx.Attr<std::string>("mkldnn_data_type") == "bfloat16";
    const bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
    if (is_bfloat16) {
      if (force_fp32_output)
        Execute<float>(ctx);
      else
        Execute<platform::bfloat16>(ctx);
    } else {
      Execute<float>(ctx);
    }
  }
J
Jacek Czaja 已提交
368

369 370 371 372 373
  template <typename T_out>
  void Execute(const framework::ExecutionContext& ctx) const {
    auto& dev_ctx =
        ctx.template device_context<platform::MKLDNNDeviceContext>();
    const auto& mkldnn_engine = dev_ctx.GetEngine();
J
Jacek Czaja 已提交
374

375 376 377 378 379
    const auto* input = ctx.Input<Tensor>("Input");
    const auto* filter = ctx.Input<Tensor>("Filter");
    const auto* bias =
        ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr;
    auto* output = ctx.Output<Tensor>("Output");
380 381
    ConvTransposeMKLDNNHandlerT<T, K, T_out> handler(ctx, mkldnn_engine, input,
                                                     filter, bias, output);
382
    auto src_memory_p = handler.AcquireSrcMemoryWithReorder(input);
383 384 385 386 387
    // Caching Key for weights is needed
    std::string key = platform::CreateKey(dev_ctx, ctx.InputName("Input"),
                                          ctx.InputName("Filter"),
                                          (bias ? ctx.InputName("Bias") : ""));
    key = platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, key);
388
    auto weights_memory_p = handler.AcquireWeightsMemoryWithReorder(
389
        dev_ctx, key, filter, ctx.Attr<int>("groups"));
390 391 392 393 394 395 396 397 398

    std::shared_ptr<dnnl::memory> dst_memory_p =
        handler.template AcquireDstMemory<T_out>(output);
    auto conv_p = handler.AcquireForwardPrimitive();

    std::unordered_map<int, dnnl::memory> args = {
        {MKLDNN_ARG_SRC, *src_memory_p},
        {MKLDNN_ARG_WEIGHTS, *weights_memory_p},
        {MKLDNN_ARG_DST, *dst_memory_p}};
A
Adam 已提交
399

J
Jacek Czaja 已提交
400
    if (bias) {
401 402
      auto bias_memory_p =
          handler.AcquireBiasMemoryWithReorder(dev_ctx, key, bias);
403
      args.insert({MKLDNN_ARG_BIAS, *bias_memory_p});
J
Jacek Czaja 已提交
404
    }
405 406
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
    conv_p->execute(astream, args);
A
Adam 已提交
407
    astream.wait();
408 409
    output->set_layout(DataLayout::kMKLDNN);
    output->set_format(platform::GetMKLDNNFormat(*dst_memory_p));
J
Jacek Czaja 已提交
410 411 412 413 414 415 416 417
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

418 419 420 421
REGISTER_OP_KERNEL(
    conv2d_transpose, MKLDNN, ::paddle::platform::CPUPlace,
    ops::ConvTransposeMKLDNNOpKernel<float, float>,
    ops::ConvTransposeMKLDNNOpKernel<paddle::platform::bfloat16, float>);