mkldnn_reuse.h 20.6 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
17
#include <memory>
18
#include <sstream>
J
Jacek Czaja 已提交
19
#include <string>
20
#include <utility>
J
Jacek Czaja 已提交
21
#include <vector>
22

X
xiaoli.liu@intel.com 已提交
23
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
24
#include "paddle/fluid/framework/operator.h"
25
#include "paddle/fluid/operators/pool_op.h"
J
Jacek Czaja 已提交
26 27
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"
28
#include "paddle/phi/backends/onednn/onednn_reuse.h"
J
Jacek Czaja 已提交
29 30 31 32

namespace paddle {
namespace platform {

33 34
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
35
using user_function = std::function<std::shared_ptr<float>(const float*)>;
36
using memory = dnnl::memory;
J
Jacek Czaja 已提交
37

38 39
template <typename T,
          typename TForward,
40 41
          typename TBackward = mkldnn_dummy_primitive,
          typename TBackward_params = mkldnn_dummy_primitive>
42 43
using MKLDNNHandlerT =
    phi::funcs::OneDNNHandlerT<T, TForward, TBackward, TBackward_params>;
44

45 46
template <typename T,
          typename TForward,
47 48
          typename TBackward = mkldnn_dummy_primitive,
          typename TBackward_params = mkldnn_dummy_primitive>
49 50
using MKLDNNHandlerNoCachingT = phi::funcs::
    OneDNNHandlerNoCachingT<T, TForward, TBackward, TBackward_params>;
51

52
template <typename T>
53
using ReductionMKLDNNHandler = phi::funcs::ReductionOneDNNHandler<T>;
54

55
template <typename T>
56
using BroadcastDataMKLDNNHandler = phi::funcs::BroadcastDataOneDNNHandler<T>;
57

58 59
template <typename T>
using BinaryMKLDNNHandler = phi::funcs::BinaryOneDNNHandler<T>;
60

61
static void AppendActivation(const framework::ExecutionContext& ctx,
62
                             dnnl::post_ops& post_ops,  // NOLINT
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
                             float activation_scale = 1.0f) {
  const auto invalid_attribute =
      ctx.HasAttr("fuse_activation")
          ? ctx.Attr<std::string>("fuse_activation").empty()
          : true;
  if (invalid_attribute) return;

  const auto fuse_activation = ctx.Attr<std::string>("fuse_activation");
  const auto fuse_alpha =
      ctx.HasAttr("fuse_alpha") ? ctx.Attr<float>("fuse_alpha") : 0.0f;
  const auto fuse_beta =
      ctx.HasAttr("fuse_beta") ? ctx.Attr<float>("fuse_beta") : 0.0f;

  if (fuse_activation == "hard_sigmoid") {
    post_ops.append_eltwise(activation_scale,
                            dnnl::algorithm::eltwise_linear,
                            fuse_alpha,
                            fuse_beta);
    post_ops.append_eltwise(
        activation_scale, dnnl::algorithm::eltwise_clip, 0.0f, 1.0f);
  } else {
    const std::unordered_map<std::string, dnnl::algorithm> activation_map = {
        {"abs", dnnl::algorithm::eltwise_abs},
        {"clip", dnnl::algorithm::eltwise_clip},
        {"gelu", dnnl::algorithm::eltwise_gelu_erf},
        {"gelu_erf", dnnl::algorithm::eltwise_gelu_erf},
        {"gelu_tanh", dnnl::algorithm::eltwise_gelu_tanh},
        {"hard_swish", dnnl::algorithm::eltwise_hardswish},
        {"leaky_relu", dnnl::algorithm::eltwise_relu},
        {"mish", dnnl::algorithm::eltwise_mish},
        {"relu", dnnl::algorithm::eltwise_relu},
        {"relu6", dnnl::algorithm::eltwise_bounded_relu},
        {"sigmoid", dnnl::algorithm::eltwise_logistic},
        {"sqrt", dnnl::algorithm::eltwise_sqrt},
        {"swish", dnnl::algorithm::eltwise_swish},
        {"tanh", dnnl::algorithm::eltwise_tanh}};

    const auto& activation_type = activation_map.find(fuse_activation);

    PADDLE_ENFORCE_NE(
        activation_type,
        activation_map.end(),
        platform::errors::InvalidArgument(
            "Activation '%s' not found in oneDNN algorithms mapper",
            fuse_activation));

    post_ops.append_eltwise(
        activation_scale, activation_type->second, fuse_alpha, fuse_beta);
  }
}

114
template <typename T>
115 116 117 118 119 120 121 122 123 124
constexpr bool IsInt8() {
  return std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value;
}

template <typename T>
constexpr bool IsBfloat16() {
  return std::is_same<T, paddle::platform::bfloat16>::value;
}

template <typename XT, typename YT, typename OT>
125
class MatMulV2MKLDNNHandler
126
    : public paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul> {
127
 public:
128 129
  MatMulV2MKLDNNHandler(const framework::ExecutionContext& ctx,
                        const dnnl::engine engine,
130
                        paddle::platform::Place cpu_place,
131 132 133 134
                        const std::vector<int64_t>& x_org_dims,
                        bool trans_x,
                        const std::vector<int64_t>& y_org_dims,
                        bool trans_y,
135 136 137
                        bool is_output_fused,
                        const std::vector<int64_t>& x_strides_override,
                        const std::vector<int64_t>& y_strides_override)
138 139
      : paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul>(engine,
                                                                    cpu_place) {
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
    // M X K * K X N
    std::vector<int64_t> x_dims(x_org_dims);
    std::vector<int64_t> y_dims(y_org_dims);

    const int MB_idx = x_dims.size() - 3;
    const int H_idx = x_dims.size() - 2;
    const int W_idx = x_dims.size() - 1;

    if (trans_x) std::swap(x_dims[H_idx], x_dims[W_idx]);
    if (trans_y) std::swap(y_dims[H_idx], y_dims[W_idx]);

    const memory::dim M = x_dims[H_idx];
    const memory::dim K = x_dims[W_idx];
    const memory::dim N = y_dims[W_idx];

    std::vector<int64_t> x_strides(x_dims.size() - 3, 1);
    std::vector<int64_t> y_strides(x_dims.size() - 3, 1);
    std::vector<int64_t> out_strides(x_dims.size() - 3, 1);
    std::vector<int64_t> out_ddims(x_dims.size() - 3, 1);

    x_strides.reserve(x_dims.size());
    y_strides.reserve(x_dims.size());
    out_strides.reserve(x_dims.size());

    if (!x_strides_override.empty()) {
      x_strides = x_strides_override;
    } else {
      if (!trans_x) {
        x_strides.insert(x_strides.end(), {M * K, K, 1});
      } else {
        x_strides.insert(x_strides.end(), {M * K, 1, M});
      }
    }

    if (!y_strides_override.empty()) {
      y_strides = y_strides_override;
    } else {
      if (!trans_y) {
        y_strides.insert(y_strides.end(), {N * K, N, 1});
      } else {
        y_strides.insert(y_strides.end(), {N * K, 1, K});
      }
    }

    out_strides.insert(out_strides.end(), {M * N, N, 1});
    out_ddims.insert(out_ddims.end(),
                     {std::max(x_dims[MB_idx], y_dims[MB_idx]), M, N});

    for (int i = x_dims.size() - 4; i >= 0; --i) {
      out_ddims[i] = std::max(x_dims[i], y_dims[i]);
      if (x_strides_override.empty()) {
        x_strides[i] = x_dims[i + 1] * x_strides[i + 1];
      }
      if (y_strides_override.empty()) {
        y_strides[i] = y_dims[i + 1] * y_strides[i + 1];
      }
      out_strides[i] = out_ddims[i + 1] * out_strides[i + 1];
    }

199
    if (!IsInt8<OT>() && !IsBfloat16<OT>() && is_output_fused) {
200 201 202
      out_strides = FakeTransposeStrides(out_ddims);
    }

203 204 205
    auto x_md = memory::desc(x_dims, MKLDNNGetDataType<XT>(), x_strides);
    auto y_md = memory::desc(y_dims, MKLDNNGetDataType<YT>(), y_strides);
    auto out_md = memory::desc(out_ddims, MKLDNNGetDataType<OT>(), out_strides);
206

207 208 209 210 211
    const dnnl::primitive_attr matmul_attrs = CreateMatmulAttrs(ctx);

    this->AcquireForwardPrimitiveDescriptor(matmul_attrs, x_md, y_md, out_md);
  }

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
  float ComputeOutputScale(const framework::ExecutionContext& ctx) {
    float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 1.0f;
    if (ctx.HasAttr("Scale_x") && ctx.HasAttr("Scale_y") &&
        ctx.HasAttr("Scale_out")) {
      float scale_x = ctx.Attr<float>("Scale_x");
      float scale_y = ctx.Attr<float>("Scale_y");
      bool force_fp32_out = ctx.HasAttr("force_fp32_output")
                                ? ctx.Attr<bool>("force_fp32_output")
                                : false;
      float scale_out = force_fp32_out ? 1.f : ctx.Attr<float>("Scale_out");
      alpha *= scale_out / (scale_x * scale_y);
    }
    return alpha;
  }

227 228 229 230 231
  dnnl::primitive_attr CreateMatmulAttrs(
      const framework::ExecutionContext& ctx) {
    dnnl::primitive_attr matmul_attrs;
    dnnl::post_ops post_operations;

232 233 234
    float scale_out = ComputeOutputScale(ctx);
    if (scale_out != 1.0f) {
      matmul_attrs.set_output_scales(0, {scale_out});
235 236
    }

237 238 239 240
    if (ctx.HasInput("ResidualData")) {
      auto* residual_data = ctx.Input<Tensor>("ResidualData");
      auto residual_data_tz = phi::vectorize(residual_data->dims());
      auto residual_data_md = memory::desc(residual_data_tz,
241 242
                                           MKLDNNGetDataType<OT>(),
                                           dnnl::memory::format_tag::any);
243 244
      post_operations.append_binary(dnnl::algorithm::binary_add,
                                    residual_data_md);
245 246 247 248
      if (ctx.HasAttr("Scale_in_eltwise")) {
        float sum_scale = scale_out / ctx.Attr<float>("Scale_in_eltwise");
        post_operations.append_sum(sum_scale);
      }
249 250
    }

251 252 253 254
    AppendActivation(ctx, post_operations);

    matmul_attrs.set_post_ops(post_operations);
    return matmul_attrs;
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
  }

  std::vector<int64_t> FakeTransposeStrides(
      const std::vector<int64_t>& matmul_out_dims) const {
    // fuse matmul_v2 + transpose + reshape guarantees that output is 4D and
    // transpose axis are: {0, 2, 1, 3}
    std::vector<int64_t> transpose_axis = {0, 2, 1, 3};
    std::vector<int64_t> fake_strides(transpose_axis.size());
    int ndims = static_cast<int>(transpose_axis.size());

    int total_stride = 1;

    for (int i = ndims - 1; i >= 0; --i) {
      fake_strides[transpose_axis[i]] = total_stride;
      total_stride *= matmul_out_dims[transpose_axis[i]];
    }

    return fake_strides;
  }

  std::shared_ptr<memory> AcquireWeightsMemory(const Tensor* input) {
276
    const YT* input_data = input->data<YT>();
277
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->weights_desc(),
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
                                            to_void_cast<YT>(input_data));
  }

  std::shared_ptr<dnnl::memory> AcquireDstMemory(
      paddle::framework::Tensor* output) {
    // We cannot use base AcquireDstMemory as it makes an allocation request
    // base on DST memory primitive size. This is fine in general, but in MatMul
    // we have primitive that covers only one batch of Data and then shift
    // pointer for every new batch. Hence Tensor size is bigger that dst memory
    // primitive size. So would we request less memory that is there and it
    // triggers an
    // assertion.  So as there is no 'any' format here we can leave default size
    // of Tensor as computed in ComputeInferShape
    OT* ptr = output->mutable_data<OT>(this->place_);
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->dst_desc(), ptr);
293 294 295
  }
};

296
template <typename T>
297
class ActivationMKLDNNHandler
298 299
    : public MKLDNNHandlerNoCachingT<T,
                                     dnnl::eltwise_forward,
300
                                     dnnl::eltwise_backward> {
301
 public:
302
  ActivationMKLDNNHandler(dnnl::algorithm algorithm,
303
                          const framework::ExecutionContext& ctx,
304 305
                          const dnnl::engine engine,
                          Place cpu_place,
306
                          const framework::Tensor* x)
307 308
      : platform::MKLDNNHandlerNoCachingT<T,
                                          dnnl::eltwise_forward,
309 310
                                          dnnl::eltwise_backward>(engine,
                                                                  cpu_place) {
311 312
    float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 0;
    float beta = ctx.HasAttr("beta") ? ctx.Attr<float>("beta") : 0;
313 314

    if (ctx.Type() == "scale") {
315 316
      bool bias_after_scale = ctx.Attr<bool>("bias_after_scale");
      auto* scale_tensor = ctx.Input<Tensor>("ScaleTensor");
317 318 319
      alpha = (scale_tensor == nullptr)
                  ? ctx.Attr<float>("scale")
                  : static_cast<float>(*(scale_tensor->data<T>()));
320 321 322 323 324
      beta = ctx.Attr<float>("bias");
      // if bias_after_scale == true
      //   out = scale*X + bias
      // else
      //   out = scale*(X + bias) = scale*X + scale*bias
325 326 327 328 329 330 331 332
      if (!bias_after_scale) {
        beta *= alpha;
      }
    } else if (ctx.Type() == "clip") {
      alpha = ctx.HasInput("Min") ? ctx.Input<Tensor>("Min")->data<float>()[0]
                                  : ctx.Attr<float>("min");
      beta = ctx.HasInput("Max") ? ctx.Input<Tensor>("Max")->data<float>()[0]
                                 : ctx.Attr<float>("max");
333 334
    } else {
      // paddle uses beta but mkldnn uses alpha for swish
335
      if (algorithm == dnnl::algorithm::eltwise_swish) {
336 337 338
        std::swap(alpha, beta);
      } else if (algorithm == dnnl::algorithm::eltwise_bounded_relu) {
        alpha = ctx.Attr<float>("threshold");
339
      }
340
    }
341

342
    this->AcquireForwardPrimitiveDescriptor(dnnl::prop_kind::forward_training,
343 344 345
                                            algorithm,
                                            x->mem_desc(),
                                            alpha,
346
                                            beta);
347 348
  }

349
  ActivationMKLDNNHandler(dnnl::algorithm algorithm,
350
                          const framework::ExecutionContext& ctx,
351 352 353 354 355 356
                          const dnnl::engine engine,
                          Place cpu_place,
                          const framework::Tensor* x,
                          const Tensor* dout)
      : platform::MKLDNNHandlerNoCachingT<T,
                                          dnnl::eltwise_forward,
357 358
                                          dnnl::eltwise_backward>(engine,
                                                                  cpu_place) {
359 360 361 362
    float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 0;
    float beta = ctx.HasAttr("beta") ? ctx.Attr<float>("beta") : 0;

    // paddle uses beta but mkldnn uses alpha for swish
363
    if (algorithm == dnnl::algorithm::eltwise_swish) {
364 365 366 367
      std::swap(alpha, beta);
    } else if (algorithm == dnnl::algorithm::eltwise_bounded_relu) {
      alpha = ctx.Attr<float>("threshold");
    }
368

369 370 371 372 373 374 375
    if (ctx.Type() == "clip_grad") {
      alpha = ctx.HasInput("Min") ? ctx.Input<Tensor>("Min")->data<float>()[0]
                                  : ctx.Attr<float>("min");
      beta = ctx.HasInput("Max") ? ctx.Input<Tensor>("Max")->data<float>()[0]
                                 : ctx.Attr<float>("max");
    }

376
    this->AcquireForwardPrimitiveDescriptor(dnnl::prop_kind::forward_training,
377 378 379
                                            algorithm,
                                            x->mem_desc(),
                                            alpha,
380
                                            beta);
381 382
    this->AcquireBackwardPrimitiveDescriptor(
        algorithm, dout->mem_desc(), x->mem_desc(), alpha, beta);
383
  }
384

385
  std::shared_ptr<dnnl::memory> AcquireBackwardSrcMemory(
386 387
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
388
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
389
                                            to_void_cast<T>(input_data));
390 391 392
  }
};

393 394 395
static std::unordered_map<std::string, std::string> GetAttributeMap(
    std::string act_type) {
  std::unordered_map<std::string, std::string> attr_map;
396
  if (act_type == "swish") {
397
    attr_map.emplace("beta", "fuse_alpha");
398
  } else if (act_type == "relu6") {
399
    attr_map.emplace("threshold", "fuse_alpha");
400
  } else if (act_type == "hard_sigmoid") {
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
    attr_map.emplace("slope", "fuse_alpha");
    attr_map.emplace("offset", "fuse_beta");
  } else if (act_type == "clip") {
    attr_map.emplace("min", "fuse_alpha");
    attr_map.emplace("max", "fuse_beta");
  } else {
    attr_map.emplace("alpha", "fuse_alpha");
    attr_map.emplace("beta", "fuse_beta");
  }
  return attr_map;
}

static std::vector<std::string> GetSupportedActivations() {
  return std::vector<std::string>{"abs",
                                  "clip",
                                  "gelu",
                                  "hard_sigmoid",
                                  "hard_swish",
                                  "leaky_relu",
                                  "mish",
                                  "relu",
                                  "relu6",
                                  "sigmoid",
                                  "sqrt",
                                  "swish",
                                  "tanh"};
427 428
}

429
class ReorderMKLDNNHandler {
430
 public:
A
Adam 已提交
431
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
432
                       framework::proto::VarType::Type vtype,
433 434
                       dnnl::memory::data_type dtype,
                       dnnl::engine engine)
435
      : dims_(dims),
436
        vtype_(vtype),
437 438
        vtype_dst_(vtype),
        dtype_(dtype),
439 440
        dtype_dst_(dtype),
        engine_(engine) {}
441 442 443

  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                       framework::proto::VarType::Type vtype,
444
                       dnnl::memory::data_type dtype,
445
                       framework::proto::VarType::Type vtype_dst,
446 447
                       dnnl::memory::data_type dtype_dst,
                       dnnl::engine engine)
448
      : dims_(dims),
449 450 451
        vtype_(vtype),
        vtype_dst_(vtype_dst),
        dtype_(dtype),
452 453
        dtype_dst_(dtype_dst),
        engine_(engine) {}
454

455 456 457 458 459
  std::shared_ptr<dnnl::memory> AcquireSrcMemory(const dnnl::memory::desc& md,
                                                 void* ptr) {
    return std::make_shared<dnnl::memory>(md, engine_, ptr);
  }

460 461 462 463
  std::shared_ptr<dnnl::memory> AcquireSrcMemory(const MKLDNNMemoryFormat& fmt,
                                                 void* ptr) {
    auto md = dnnl::memory::desc(dims_, dtype_, fmt);
    return std::make_shared<dnnl::memory>(md, engine_, ptr);
464 465
  }

466
  std::shared_ptr<dnnl::memory> AcquireSubmemory(
467 468
      const std::vector<int64_t>& dims,
      const std::vector<int64_t>& offset,
469
      const std::shared_ptr<dnnl::memory>& mem_p) {
470
    auto sub_md = mem_p->get_desc().submemory_desc(dims, {offset});
471 472
    auto sub_mem_p = std::make_shared<dnnl::memory>(
        sub_md, engine_, mem_p->get_data_handle());
473 474 475
    return sub_mem_p;
  }

476 477 478
  std::shared_ptr<dnnl::memory> AcquireDstMemory(framework::Tensor* output,
                                                 const MKLDNNMemoryFormat& fmt,
                                                 platform::Place place) {
479
    auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_dst_, fmt);
480
    auto dst_data = output->mutable_data(
481
        place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size());
482
    return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
483 484
  }

485
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
486 487
      framework::Tensor* output,
      const dnnl::memory::desc& src_md,
488 489 490 491 492 493 494 495 496 497 498 499 500 501
      platform::Place place) {
    if (vtype_dst_ == vtype_) {
      auto dst_data = output->mutable_data(
          place, framework::TransToPhiDataType(vtype_dst_), src_md.get_size());
      return std::make_shared<dnnl::memory>(src_md, engine_, dst_data);
    } else {
      auto dst_md = src_md;
      dst_md.data.data_type = static_cast<dnnl_data_type_t>(dtype_dst_);
      auto dst_data = output->mutable_data(
          place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size());
      return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
    }
  }

502
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
503 504 505 506
      framework::Tensor* output,
      const std::vector<int64_t>& dims,
      const MKLDNNMemoryFormat& fmt,
      platform::Place place) {
507
    auto dst_md = platform::MKLDNNMemDesc(dims, dtype_dst_, fmt);
508
    auto dst_data = output->mutable_data(
509
        place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size());
510
    return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
511 512
  }

513 514 515 516
  std::shared_ptr<dnnl::reorder> AcquireReorder(
      std::shared_ptr<dnnl::memory> dst_memory_p,
      std::shared_ptr<dnnl::memory> src_memory_p) {
    return std::make_shared<dnnl::reorder>(*(src_memory_p), *(dst_memory_p));
517 518
  }

519 520 521 522
  std::shared_ptr<dnnl::reorder> AcquireReorder(
      std::shared_ptr<dnnl::memory> dst_memory_p,
      std::shared_ptr<dnnl::memory> src_memory_p,
      const dnnl::primitive_attr& attrs) {
523 524
    return std::make_shared<dnnl::reorder>(
        *(src_memory_p), *(dst_memory_p), attrs);
525 526
  }

527
 private:
A
Adam 已提交
528
  std::vector<int64_t> dims_;
529
  framework::proto::VarType::Type vtype_, vtype_dst_;
530 531
  dnnl::memory::data_type dtype_, dtype_dst_;
  dnnl::engine engine_;
532
};
J
Jacek Czaja 已提交
533 534
}  // namespace platform
}  // namespace paddle