fc_mkldnn_op.cc 21.6 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/framework/op_registry.h"
18
#include "paddle/fluid/operators/fc_op.h"
19
#include "paddle/fluid/platform/profiler/event_tracing.h"
20
#include "paddle/phi/backends/onednn/onednn_reuse.h"
21

M
mozga-intel 已提交
22 23 24
namespace paddle {
namespace operators {

25
using framework::ExecutionContext;
26
using phi::OneDNNContext;
27 28
using phi::funcs::OneDNNGetDataType;
using phi::funcs::to_void_cast;
29

30 31 32 33 34 35 36
struct InnerProductCache {
  dnnl::inner_product_forward inner_product_p;
  dnnl::memory src_mem;
  dnnl::memory weights_mem;
  dnnl::memory bias_mem;
  dnnl::memory dst_mem;
};
M
Michał Gallus 已提交
37
template <typename T_in, typename T_w, typename T_out>
38
class FCMKLDNNHandler
39 40
    : public phi::funcs::OneDNNHandlerNoCachingT<T_in,
                                                 dnnl::inner_product_forward> {
M
mozga-intel 已提交
41
 public:
42
  FCMKLDNNHandler(const ExecutionContext& ctx,
43
                  const OneDNNContext& dev_ctx,
44 45 46
                  const phi::DenseTensor* x,
                  const phi::DenseTensor* weights,
                  const phi::DenseTensor* bias,
47
                  phi::DenseTensor* out UNUSED,
48
                  const int in_num_col_dims,
49
                  dnnl::engine onednn_engine,
50
                  platform::Place cpu_place)
51
      : phi::funcs::OneDNNHandlerNoCachingT<T_in, dnnl::inner_product_forward>(
52
            onednn_engine, cpu_place),
53 54 55 56 57 58 59 60 61
        dev_ctx_(dev_ctx) {
    this->memory_key_ = ctx.InputName("W");

    auto x_vec_dims = phi::vectorize(x->dims());
    auto weights_vec_dims = phi::vectorize(weights->dims());

    int MB = 1;
    for (int i = 0; i < in_num_col_dims; ++i) {
      MB *= x_vec_dims[i];
62 63
    }

64 65 66
    int IC = 1;
    for (size_t i = in_num_col_dims; i < x_vec_dims.size(); ++i) {
      IC *= x_vec_dims[i];
67
    }
68

69
    int OC = weights_vec_dims[1];
M
mozga-intel 已提交
70

71
    dnnl::memory::desc bias_md;
72

73
    auto src_md = dnnl::memory::desc(
74
        {MB, IC}, OneDNNGetDataType<T_in>(), dnnl::memory::format_tag::any);
75
    auto weights_md = dnnl::memory::desc(
76
        {OC, IC}, OneDNNGetDataType<T_w>(), dnnl::memory::format_tag::any);
77
    auto dst_md = dnnl::memory::desc(
78
        {MB, OC}, OneDNNGetDataType<T_out>(), dnnl::memory::format_tag::any);
79 80
    if (bias) {
      bias_md = dnnl::memory::desc({bias->numel()},
81
                                   OneDNNGetDataType<float>(),
82 83
                                   dnnl::memory::format_tag::a);
    }
84

85
    const auto attrs = CreateFCAttrs(ctx);
A
Adam 已提交
86

87
    this->AcquireForwardPrimitiveDescriptor(attrs,
88
                                            dnnl::prop_kind::forward_inference,
89 90 91 92
                                            src_md,
                                            weights_md,
                                            bias_md,
                                            dst_md);
M
mozga-intel 已提交
93 94
  }

95
 private:
96 97 98
  dnnl::primitive_attr CreateFCAttrs(const ExecutionContext& ctx) {
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
99

100 101
    float sum_scale = 1.0f;
    float activation_scale = 1.0f;
102
    if (phi::funcs::is_int8<T_w>()) {
103 104 105
      std::vector<float> output_shift_scale;
      std::tie(output_shift_scale, sum_scale, activation_scale) =
          GetOutputScales(ctx);
106
      int mask = CreateMask(1, output_shift_scale.size() > 1);
107
      attributes.set_output_scales(mask, output_shift_scale);
108
    }
109

110 111
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
112
      post_operations.append_sum(sum_scale);
113
    }
M
mozga-intel 已提交
114

115 116 117
    // ReLU from "fc_fuse_pass"
    if (ctx.Attr<std::string>("activation_type") == "relu") {
      post_operations.append_eltwise(
118
          activation_scale, dnnl::algorithm::eltwise_relu, 0.0f, 0.0f);
119
    }
120
    AppendActivation(ctx, post_operations, activation_scale);
121

122 123 124 125 126 127
    if (ctx.HasAttr("fused_output_scale")) {
      float scale_alpha = ctx.Attr<float>("fused_output_scale");
      post_operations.append_eltwise(
          1.0, dnnl::algorithm::eltwise_linear, scale_alpha, 0.0f);
    }

128 129
    attributes.set_post_ops(post_operations);
    return attributes;
130 131
  }

M
Michał Gallus 已提交
132 133
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
134
  std::vector<float> GetBiasScales(const ExecutionContext& ctx) {
135 136 137 138 139 140 141 142 143 144 145 146 147 148
    if (ctx.HasAttr("Bias_scales")) {
      return ctx.Attr<std::vector<float>>("Bias_scales");
    } else {
      const float scale_in = ctx.Attr<float>("Scale_in");
      const auto& scale_weights = ctx.Attr<std::vector<float>>("Scale_weights");
      std::vector<float> bias_scales(scale_weights.size());

      for (size_t i = 0; i < bias_scales.size(); ++i) {
        if (scale_weights[i] == 0.0)
          bias_scales[i] = 1.0f;
        else
          bias_scales[i] = scale_in * scale_weights[i];
      }
      return bias_scales;
M
Michał Gallus 已提交
149 150 151
    }
  }

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
  void AppendActivation(const ExecutionContext& ctx,
                        dnnl::post_ops& post_ops,  // NOLINT
                        float activation_scale = 1.0f) {
    const auto invalid_attribute =
        ctx.HasAttr("fuse_activation")
            ? ctx.Attr<std::string>("fuse_activation").empty()
            : true;
    if (invalid_attribute) return;

    const auto fuse_activation = ctx.Attr<std::string>("fuse_activation");
    const auto fuse_alpha =
        ctx.HasAttr("fuse_alpha") ? ctx.Attr<float>("fuse_alpha") : 0.0f;
    const auto fuse_beta =
        ctx.HasAttr("fuse_beta") ? ctx.Attr<float>("fuse_beta") : 0.0f;

167 168 169 170 171 172 173 174 175 176 177 178
    const auto activation_map = phi::funcs::OneDNNActivationMap();
    const auto& activation_type = activation_map.find(fuse_activation);

    PADDLE_ENFORCE_NE(
        activation_type,
        activation_map.end(),
        phi::errors::InvalidArgument(
            "Activation '%s' not found in oneDNN algorithms mapper",
            fuse_activation));

    post_ops.append_eltwise(
        activation_scale, activation_type->second, fuse_alpha, fuse_beta);
179 180
  }

M
Michał Gallus 已提交
181 182 183 184 185
  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
186
  std::tuple<std::vector<float>, float, float> GetOutputScales(
187
      const ExecutionContext& ctx) {
188 189 190 191 192 193 194
    if (ctx.HasAttr("Sum_scale")) {
      return std::make_tuple(ctx.Attr<std::vector<float>>("Output_shift_scale"),
                             ctx.Attr<float>("Sum_scale"),
                             ctx.Attr<float>("Activation_scale"));
    } else {
      auto scale_in_data = ctx.Attr<float>("Scale_in");
      auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
195 196 197
      bool has_activation = !ctx.Attr<std::string>("activation_type").empty() ||
                            (ctx.HasAttr("fuse_activation") &&
                             !ctx.Attr<std::string>("fuse_activation").empty());
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
      bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
      bool fuse_residual_conn = ctx.HasAttr("fuse_residual_connection") &&
                                ctx.Attr<bool>("fuse_residual_connection");
      auto scale_in_eltwise_data = ctx.HasAttr("Scale_in_eltwise")
                                       ? ctx.Attr<float>("Scale_in_eltwise")
                                       : 1.0f;

      // If the output will be in floats, we don't multiply by scale_out.

      float activation_scale = (!force_fp32_output && has_activation)
                                   ? ctx.Attr<float>("Scale_out")
                                   : 1.0f;
      float scale_out_data = (force_fp32_output || has_activation)
                                 ? 1.0f
                                 : ctx.Attr<float>("Scale_out");
      float sum_scale =
          fuse_residual_conn ? scale_out_data / scale_in_eltwise_data : 1.0f;
      const size_t weight_scales_num = scale_weights_data.size();

      for (size_t i = 0; i < weight_scales_num; ++i) {
        if (scale_weights_data[i] == 0.0)
          scale_weights_data[i] = scale_out_data;
        else
          scale_weights_data[i] =
              scale_out_data / (scale_in_data * scale_weights_data[i]);
      }
      return std::make_tuple(scale_weights_data, sum_scale, activation_scale);
M
Michał Gallus 已提交
225 226 227
    }
  }

228 229
  // Computing oneDNN's scaling mask which determines along which dimension
  // slice should the scaling be applied.
M
Michał Gallus 已提交
230 231 232 233
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

234 235 236 237 238 239
  std::shared_ptr<dnnl::memory> AcquireMemoryWithReorderAndAttrs(
      const dnnl::memory::desc& user_md,
      const dnnl::memory::desc& target_md,
      void* ptr,
      const dnnl::primitive_attr& attrs) {
    std::shared_ptr<dnnl::memory> target_memory_p;
M
Michał Gallus 已提交
240

241 242 243 244 245
    auto user_memory_p =
        std::make_shared<dnnl::memory>(user_md, this->engine_, ptr);
    target_memory_p = std::make_shared<dnnl::memory>(target_md, this->engine_);
    auto reorder_p = std::make_shared<dnnl::reorder>(
        *user_memory_p, *target_memory_p, attrs);
M
Michał Gallus 已提交
246

247
    auto& astream = OneDNNContext::tls().get_stream();
248 249 250 251 252 253 254 255 256 257 258
    {
      platform::RecordEvent record_reorder(
          "int_reorder",
          platform::TracerEventType::UserDefined,
          1,
          platform::EventRole::kUniqueOp);
      reorder_p->execute(
          astream,
          {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
      astream.wait();
    }
M
Michał Gallus 已提交
259

260 261
    return target_memory_p;
  }
262

263
  std::string memory_key_;
264
  const OneDNNContext& dev_ctx_;
M
Michał Gallus 已提交
265

266
 public:
267 268
  std::shared_ptr<dnnl::memory> AcquireSrcMemoryWithReorder(
      const phi::DenseTensor* x) {
269 270 271 272 273 274 275
    const T_in* x_data = x->data<T_in>();

    auto user_md = x->mem_desc();
    if (x->dims().size() != 2) {
      // reshape restrictions are always satisfied because in case of 3 or 4 dim
      // input, plain layout is enforced
      user_md = user_md.reshape(this->fwd_pd_->src_desc().dims());
M
Michał Gallus 已提交
276 277
    }

278 279
    return this->AcquireMemoryWithReorder(
        user_md, this->fwd_pd_->src_desc(), to_void_cast<T_in>(x_data));
280
  }
M
mozga-intel 已提交
281

282
  std::shared_ptr<dnnl::memory> AcquireBiasMemoryWithReorder(
283
      const ExecutionContext& ctx, const phi::DenseTensor* bias) {
284 285
    const float* bias_data = bias->data<float>();

286
    if (phi::funcs::is_int8<T_w>() == false) {
287 288 289 290 291 292 293 294 295
      // for BF16/FP32 bias is 1D and has no scales, so reorder is not needed
      return this->AcquireMemoryFromPrimitive(this->fwd_pd_->bias_desc(),
                                              to_void_cast<float>(bias_data));
    } else {
      const std::string bias_key = this->memory_key_ + "@bias";
      auto memory_p = std::static_pointer_cast<dnnl::memory>(
          this->dev_ctx_.GetBlob(bias_key));

      if (!memory_p) {
296
        const auto& scale_data = GetBiasScales(ctx);
297 298 299 300 301 302
        dnnl::primitive_attr attrs;

        int mask = CreateMask(0, scale_data.size() > 1);
        attrs.set_output_scales(mask, scale_data);

        auto user_md = dnnl::memory::desc({bias->dims()[0]},
303
                                          OneDNNGetDataType<float>(),
304 305 306 307 308 309 310
                                          dnnl::memory::format_tag::a);

        memory_p = this->AcquireMemoryWithReorderAndAttrs(
            user_md,
            this->fwd_pd_->bias_desc(),
            to_void_cast<float>(bias_data),
            attrs);
311
        this->dev_ctx_.SetBlob(bias_key, memory_p);
312 313 314 315 316 317
      }
      return memory_p;
    }
  }

  std::shared_ptr<dnnl::memory> AcquireWeightsMemoryWithReorder(
318
      const phi::DenseTensor* weights, const std::vector<float>& scale_data) {
319 320 321
    const std::string weights_key = this->memory_key_ + "@weights";
    auto memory_p = std::static_pointer_cast<dnnl::memory>(
        this->dev_ctx_.GetBlob(weights_key));
M
mozga-intel 已提交
322

323 324 325 326 327
    if (!memory_p) {
      const float* weights_data = weights->data<float>();
      auto weights_dims = this->fwd_pd_->weights_desc().dims();

      auto user_md = dnnl::memory::desc(weights_dims,
328
                                        OneDNNGetDataType<float>(),
329 330
                                        dnnl::memory::format_tag::io);

331
      if (phi::funcs::is_int8<T_w>()) {
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
        dnnl::primitive_attr attrs;
        int mask = CreateMask(0, scale_data.size() > 1);
        attrs.set_output_scales(mask, scale_data);

        memory_p = this->AcquireMemoryWithReorderAndAttrs(
            user_md,
            this->fwd_pd_->weights_desc(),
            to_void_cast<float>(weights_data),
            attrs);
      } else {
        memory_p =
            this->AcquireMemoryWithReorder(user_md,
                                           this->fwd_pd_->weights_desc(),
                                           to_void_cast<float>(weights_data));
      }

      this->dev_ctx_.SetBlob(weights_key, memory_p);
    }
    return memory_p;
351
  }
M
mozga-intel 已提交
352

353
  std::shared_ptr<dnnl::memory> AcquireCustomDstMemory(
354
      const ExecutionContext& ctx, phi::DenseTensor* out) {
355 356
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
357
      auto* residual_param = ctx.Input<phi::DenseTensor>("ResidualData");
358 359

      PADDLE_ENFORCE_EQ(
360
          out->dims(),
361
          residual_param->dims(),
362
          phi::errors::InvalidArgument(
363 364 365
              "Output and elementwise parameter need to have the "
              "same dimension sizes, but got output's dimension = %d"
              " and residual param's dimension =%d .",
366
              out->dims().size(),
367
              residual_param->dims().size()));
368

369
      out->ShareDataWith(*residual_param);
370
    }
371
    return this->template AcquireDstMemory<T_out>(out);
372 373
  }  // namespace operators
};   // namespace paddle
374

375 376 377 378 379 380 381 382 383 384
#define IF_CHANGE_FC_TW_TYPENAME(condition, ...) \
  if (condition) {                               \
    using T_w = int8_t;                          \
    __VA_ARGS__();                               \
  } else {                                       \
    using T_w = T_in;                            \
    __VA_ARGS__();                               \
  }

template <typename T_in>
385 386
class FCMKLDNNKernel : public framework::OpKernel<T_in> {
 public:
387
  void Compute(const ExecutionContext& ctx) const override {
388 389
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
390

391 392 393
    IF_CHANGE_FC_TW_TYPENAME((std::is_same<T_in, uint8_t>::value), ([&] {
                               if (force_fp32_output) {
                                 this->RunKernel<float, T_w>(ctx);
394
                               } else if (phi::funcs::is_int8<T_in>()) {
395 396 397 398 399 400 401 402 403
                                 if (fuse_relu) {
                                   this->RunKernel<uint8_t, T_w>(ctx);
                                 } else {
                                   this->RunKernel<int8_t, T_w>(ctx);
                                 }
                               } else {
                                 this->RunKernel<T_in, T_w>(ctx);
                               }
                             }));
404 405
  }

406
  void PrepareSrcMem(const std::shared_ptr<dnnl::inner_product_forward>& fc_p,
407
                     const std::shared_ptr<dnnl::memory>& src_mem,
408
                     const phi::DenseTensor* x,
409 410 411 412 413 414
                     const dnnl::engine& engine) const {
    auto x_md = x->mem_desc().reshape(src_mem->get_desc().dims());
    if (x_md != src_mem->get_desc()) {
      dnnl::memory x_mem(x_md, engine, to_void_cast<T_in>(x->data<T_in>()));
      auto reorder_p = dnnl::reorder(x_mem, *src_mem);

415
      auto& astream = OneDNNContext::tls().get_stream();
416 417 418 419 420 421 422
      reorder_p.execute(astream, x_mem, *src_mem);
      astream.wait();
    } else {
      src_mem->set_data_handle(to_void_cast<T_in>(x->data<T_in>()));
    }
  }

423
  template <typename T_out, typename T_w>
424
  void RunKernel(const ExecutionContext& ctx) const {
425
    const auto& dev_ctx = ctx.template device_context<OneDNNContext>();
426
    const auto& onednn_engine = dev_ctx.GetEngine();
427

428
    const auto* x = ctx.Input<phi::DenseTensor>("Input");
429 430
    const auto* weights = ctx.Input<phi::DenseTensor>("W");
    const auto* bias = ctx.Input<phi::DenseTensor>("Bias");
431
    auto out = ctx.Output<phi::DenseTensor>("Out");
432 433 434

    const auto& scale_weights = ctx.Attr<std::vector<float>>("Scale_weights");

435 436 437 438 439 440 441 442
    std::shared_ptr<dnnl::inner_product_forward> fc_p;
    std::shared_ptr<dnnl::memory> src_memory_p;
    std::shared_ptr<dnnl::memory> weights_memory_p;
    std::shared_ptr<dnnl::memory> bias_memory_p;
    std::shared_ptr<dnnl::memory> dst_memory_p;

    std::string cache_key;
    cache_key.reserve(64);
443
    cache_key = phi::funcs::ExtendKeyWithThreadInfoIfNeeded(
444
        dev_ctx,
445 446 447 448
        phi::funcs::CreateKey(dev_ctx,
                              ctx.InputName("Input"),
                              ctx.InputName("W"),
                              phi::vectorize(x->dims())));
449 450 451 452

    auto inner_product_cache =
        std::static_pointer_cast<InnerProductCache>(dev_ctx.GetBlob(cache_key));

453 454
    RecomputeOutputDims(ctx, x, weights, out);

455 456 457 458 459
    if (inner_product_cache) {
      fc_p = std::make_shared<dnnl::inner_product_forward>(
          inner_product_cache->inner_product_p);
      src_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->src_mem);
460
      PrepareSrcMem(fc_p, src_memory_p, x, onednn_engine);
461 462 463 464 465 466 467 468

      weights_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->weights_mem);

      dst_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->dst_mem);
      if (ctx.HasAttr("fuse_residual_connection") &&
          ctx.Attr<bool>("fuse_residual_connection")) {
469
        auto* residual_param = ctx.Input<phi::DenseTensor>("ResidualData");
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
        out->ShareDataWith(*residual_param);
      }
      auto out_ptr = out->mutable_data<T_out>(
          ctx.GetPlace(), dst_memory_p->get_desc().get_size());
      dst_memory_p->set_data_handle(out_ptr);

      if (bias) {
        bias_memory_p =
            std::make_shared<dnnl::memory>(inner_product_cache->bias_mem);
      }
    } else {
      auto in_col_dims = ctx.Attr<int>("in_num_col_dims");

      FCMKLDNNHandler<T_in, T_w, T_out> handler(ctx,
                                                dev_ctx,
                                                x,
                                                weights,
                                                bias,
                                                out,
                                                in_col_dims,
490
                                                onednn_engine,
491 492 493 494 495 496 497 498
                                                ctx.GetPlace());

      src_memory_p = handler.AcquireSrcMemoryWithReorder(x);
      weights_memory_p =
          handler.AcquireWeightsMemoryWithReorder(weights, scale_weights);
      dst_memory_p = handler.AcquireCustomDstMemory(ctx, out);

      if (bias) {
499
        bias_memory_p = handler.AcquireBiasMemoryWithReorder(ctx, bias);
500 501 502 503 504
      }

      fc_p = handler.AcquireForwardPrimitive();
    }

505
    auto& astream = OneDNNContext::tls().get_stream();
506 507 508 509 510 511 512 513 514 515 516 517 518

    std::unordered_map<int, dnnl::memory> fc_args = {
        {DNNL_ARG_SRC, *src_memory_p},
        {DNNL_ARG_WEIGHTS, *weights_memory_p},
        {DNNL_ARG_DST, *dst_memory_p}};

    if (bias) {
      fc_args.insert({DNNL_ARG_BIAS, *bias_memory_p});
    }

    fc_p->execute(astream, fc_args);
    astream.wait();

519 520 521 522 523 524 525 526 527 528 529 530
    if (!inner_product_cache) {
      auto ip_cache = std::make_shared<InnerProductCache>();
      ip_cache->inner_product_p = *fc_p;
      ip_cache->src_mem = *src_memory_p;
      ip_cache->weights_mem = *weights_memory_p;
      ip_cache->dst_mem = *dst_memory_p;
      if (bias) {
        ip_cache->bias_mem = *bias_memory_p;
      }
      dev_ctx.SetBlob(cache_key, ip_cache);
    }

531 532 533 534 535 536 537 538 539
    const auto out_md =
        dst_memory_p->get_desc().reshape(phi::vectorize(out->dims()));

    if (ctx.HasAttr("fused_reshape2_shape")) {
      phi::funcs::SetOutMemDescWithReshape2FuseSupport(
          ctx.Attr<std::vector<int>>("fused_reshape2_shape"), out, out_md);
    } else {
      out->set_mem_desc(out_md);
    }
540
  }
M
mozga-intel 已提交
541

542
  void RecomputeOutputDims(const ExecutionContext& ctx,
543
                           const phi::DenseTensor* x,
544
                           const phi::DenseTensor* weights,
545
                           phi::DenseTensor* out) const {
L
luotao1 已提交
546
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
547
    bool padding_weights = ctx.Attr<bool>("padding_weights");
548 549
    PADDLE_ENFORCE_EQ(padding_weights,
                      false,
550 551
                      phi::errors::PermissionDenied(
                          "Weight padding in fc can not be used in oneDNN."));
L
luotao1 已提交
552
    std::vector<int64_t> output_dims;
553 554
    FCOutputSize(x->dims(),
                 weights->dims(),
555 556
                 output_dims,
                 in_num_col_dims,
557
                 padding_weights);
558 559
    out->Resize(phi::make_ddim(output_dims));
    out->set_lod(x->lod());
560 561
  }
};
M
mozga-intel 已提交
562 563 564 565

}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
566 567 568 569
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
570 571 572

REGISTER_OP_KERNEL(fc,
                   MKLDNN,
573
                   ::phi::CPUPlace,
574 575 576 577
                   ops::FCMKLDNNKernel<float>,
                   ops::FCMKLDNNKernel<paddle::platform::bfloat16>,
                   ops::FCMKLDNNKernel<uint8_t>,
                   ops::FCMKLDNNKernel<int8_t>);