fc_mkldnn_op.cc 20.7 KB
Newer Older
M
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <memory>
W
wanghuancoder 已提交
16

17
#include "paddle/fluid/operators/fc_op.h"
M
mozga-intel 已提交
18
#include "paddle/fluid/platform/mkldnn_helper.h"
19
#include "paddle/fluid/platform/mkldnn_reuse.h"
20

M
mozga-intel 已提交
21 22 23
namespace paddle {
namespace operators {

24 25 26 27 28
using dnnl::inner_product_forward;
using dnnl::memory;
using dnnl::primitive;
using dnnl::prop_kind;
using dnnl::stream;
29 30
using framework::DDim;
using framework::ExecutionContext;
31
using LoDTensor = phi::DenseTensor;
32 33
using phi::funcs::OneDNNGetDataType;
using phi::funcs::to_void_cast;
34
using platform::MKLDNNDeviceContext;
35

36 37 38 39 40 41 42
struct InnerProductCache {
  dnnl::inner_product_forward inner_product_p;
  dnnl::memory src_mem;
  dnnl::memory weights_mem;
  dnnl::memory bias_mem;
  dnnl::memory dst_mem;
};
M
Michał Gallus 已提交
43
template <typename T_in, typename T_w, typename T_out>
44
class FCMKLDNNHandler
45 46
    : public phi::funcs::OneDNNHandlerNoCachingT<T_in,
                                                 dnnl::inner_product_forward> {
M
mozga-intel 已提交
47
 public:
48 49
  FCMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
                  const platform::MKLDNNDeviceContext& dev_ctx,
50 51 52 53
                  const phi::DenseTensor* x,
                  const phi::DenseTensor* weights,
                  const phi::DenseTensor* bias,
                  phi::DenseTensor* out,
54 55 56
                  const int in_num_col_dims,
                  dnnl::engine mkldnn_engine,
                  platform::Place cpu_place)
57
      : phi::funcs::OneDNNHandlerNoCachingT<T_in, dnnl::inner_product_forward>(
58 59 60 61 62 63 64 65 66 67
            mkldnn_engine, cpu_place),
        dev_ctx_(dev_ctx) {
    this->memory_key_ = ctx.InputName("W");

    auto x_vec_dims = phi::vectorize(x->dims());
    auto weights_vec_dims = phi::vectorize(weights->dims());

    int MB = 1;
    for (int i = 0; i < in_num_col_dims; ++i) {
      MB *= x_vec_dims[i];
68 69
    }

70 71 72
    int IC = 1;
    for (size_t i = in_num_col_dims; i < x_vec_dims.size(); ++i) {
      IC *= x_vec_dims[i];
73
    }
74

75
    int OC = weights_vec_dims[1];
M
mozga-intel 已提交
76

77
    dnnl::memory::desc bias_md;
78

79
    auto src_md = dnnl::memory::desc(
80
        {MB, IC}, OneDNNGetDataType<T_in>(), dnnl::memory::format_tag::any);
81
    auto weights_md = dnnl::memory::desc(
82
        {OC, IC}, OneDNNGetDataType<T_w>(), dnnl::memory::format_tag::any);
83
    auto dst_md = dnnl::memory::desc(
84
        {MB, OC}, OneDNNGetDataType<T_out>(), dnnl::memory::format_tag::any);
85 86
    if (bias) {
      bias_md = dnnl::memory::desc({bias->numel()},
87
                                   OneDNNGetDataType<float>(),
88 89
                                   dnnl::memory::format_tag::a);
    }
90

91
    const auto attrs = CreateFCAttrs(ctx);
A
Adam 已提交
92

93 94 95 96 97 98
    this->AcquireForwardPrimitiveDescriptor(attrs,
                                            prop_kind::forward_inference,
                                            src_md,
                                            weights_md,
                                            bias_md,
                                            dst_md);
M
mozga-intel 已提交
99 100
  }

101
 private:
102 103 104
  dnnl::primitive_attr CreateFCAttrs(const ExecutionContext& ctx) {
    dnnl::primitive_attr attributes;
    dnnl::post_ops post_operations;
105

106 107
    float sum_scale = 1.0f;
    float activation_scale = 1.0f;
108
    if (phi::funcs::is_int8<T_w>()) {
109 110 111
      std::vector<float> output_shift_scale;
      std::tie(output_shift_scale, sum_scale, activation_scale) =
          GetOutputScales(ctx);
112
      int mask = CreateMask(1, output_shift_scale.size() > 1);
113
      attributes.set_output_scales(mask, output_shift_scale);
114
    }
115

116 117
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
118
      post_operations.append_sum(sum_scale);
119
    }
M
mozga-intel 已提交
120

121 122 123
    // ReLU from "fc_fuse_pass"
    if (ctx.Attr<std::string>("activation_type") == "relu") {
      post_operations.append_eltwise(
124
          activation_scale, dnnl::algorithm::eltwise_relu, 0.0f, 0.0f);
125
    }
126
    platform::AppendActivation(ctx, post_operations, activation_scale);
127

128 129 130 131 132 133
    if (ctx.HasAttr("fused_output_scale")) {
      float scale_alpha = ctx.Attr<float>("fused_output_scale");
      post_operations.append_eltwise(
          1.0, dnnl::algorithm::eltwise_linear, scale_alpha, 0.0f);
    }

134 135
    attributes.set_post_ops(post_operations);
    return attributes;
136 137
  }

M
Michał Gallus 已提交
138 139
  // Compute the bias scales so that its values correspond to the
  // scale of data being an output of weights and input multiplication
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
  std::vector<float> GetBiasScales(const framework::ExecutionContext& ctx) {
    if (ctx.HasAttr("Bias_scales")) {
      return ctx.Attr<std::vector<float>>("Bias_scales");
    } else {
      const float scale_in = ctx.Attr<float>("Scale_in");
      const auto& scale_weights = ctx.Attr<std::vector<float>>("Scale_weights");
      std::vector<float> bias_scales(scale_weights.size());

      for (size_t i = 0; i < bias_scales.size(); ++i) {
        if (scale_weights[i] == 0.0)
          bias_scales[i] = 1.0f;
        else
          bias_scales[i] = scale_in * scale_weights[i];
      }
      return bias_scales;
M
Michał Gallus 已提交
155 156 157 158 159 160 161 162
    }
  }

  // Correct output scale, to take into account scaling of input and weights
  // Since the data that comes out of input and weight multiplication is
  // scaled with its own scales, this data needs to be divided by
  // those scales to normalise them back to what their floating-point range
  // was. Then we multiply them by desired output scale we want on the output.
163
  std::tuple<std::vector<float>, float, float> GetOutputScales(
164
      const ExecutionContext& ctx) {
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
    if (ctx.HasAttr("Sum_scale")) {
      return std::make_tuple(ctx.Attr<std::vector<float>>("Output_shift_scale"),
                             ctx.Attr<float>("Sum_scale"),
                             ctx.Attr<float>("Activation_scale"));
    } else {
      auto scale_in_data = ctx.Attr<float>("Scale_in");
      auto scale_weights_data = ctx.Attr<std::vector<float>>("Scale_weights");
      bool has_activation = !ctx.Attr<std::string>("activation_type").empty();
      bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
      bool fuse_residual_conn = ctx.HasAttr("fuse_residual_connection") &&
                                ctx.Attr<bool>("fuse_residual_connection");
      auto scale_in_eltwise_data = ctx.HasAttr("Scale_in_eltwise")
                                       ? ctx.Attr<float>("Scale_in_eltwise")
                                       : 1.0f;

      // If the output will be in floats, we don't multiply by scale_out.

      float activation_scale = (!force_fp32_output && has_activation)
                                   ? ctx.Attr<float>("Scale_out")
                                   : 1.0f;
      float scale_out_data = (force_fp32_output || has_activation)
                                 ? 1.0f
                                 : ctx.Attr<float>("Scale_out");
      float sum_scale =
          fuse_residual_conn ? scale_out_data / scale_in_eltwise_data : 1.0f;
      const size_t weight_scales_num = scale_weights_data.size();

      for (size_t i = 0; i < weight_scales_num; ++i) {
        if (scale_weights_data[i] == 0.0)
          scale_weights_data[i] = scale_out_data;
        else
          scale_weights_data[i] =
              scale_out_data / (scale_in_data * scale_weights_data[i]);
      }
      return std::make_tuple(scale_weights_data, sum_scale, activation_scale);
M
Michał Gallus 已提交
200 201 202 203 204 205 206 207 208 209 210
    }
  }

  // Computing MKL-DNN's scaling mask which determines along which dimension
  // slice should the scaling be applied. For more data plase refer to:
  // https://intel.github.io/mkl-dnn/group__c__api__attributes.html
  // Section dnnl_status_t DNNL_API dnnl_primitive_attr_set_output_scales
  int CreateMask(int slice_dimension, bool is_multi_channel_quantizied) {
    return is_multi_channel_quantizied ? 1 << slice_dimension : 0;
  }

211 212 213 214 215 216
  std::shared_ptr<dnnl::memory> AcquireMemoryWithReorderAndAttrs(
      const dnnl::memory::desc& user_md,
      const dnnl::memory::desc& target_md,
      void* ptr,
      const dnnl::primitive_attr& attrs) {
    std::shared_ptr<dnnl::memory> target_memory_p;
M
Michał Gallus 已提交
217

218 219 220 221 222
    auto user_memory_p =
        std::make_shared<dnnl::memory>(user_md, this->engine_, ptr);
    target_memory_p = std::make_shared<dnnl::memory>(target_md, this->engine_);
    auto reorder_p = std::make_shared<dnnl::reorder>(
        *user_memory_p, *target_memory_p, attrs);
M
Michał Gallus 已提交
223

224
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
225 226 227 228 229 230 231 232 233 234 235
    {
      platform::RecordEvent record_reorder(
          "int_reorder",
          platform::TracerEventType::UserDefined,
          1,
          platform::EventRole::kUniqueOp);
      reorder_p->execute(
          astream,
          {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
      astream.wait();
    }
M
Michał Gallus 已提交
236

237 238
    return target_memory_p;
  }
239

240 241
  std::string memory_key_;
  const platform::MKLDNNDeviceContext& dev_ctx_;
M
Michał Gallus 已提交
242

243
 public:
244 245
  std::shared_ptr<dnnl::memory> AcquireSrcMemoryWithReorder(
      const phi::DenseTensor* x) {
246 247 248 249 250 251 252
    const T_in* x_data = x->data<T_in>();

    auto user_md = x->mem_desc();
    if (x->dims().size() != 2) {
      // reshape restrictions are always satisfied because in case of 3 or 4 dim
      // input, plain layout is enforced
      user_md = user_md.reshape(this->fwd_pd_->src_desc().dims());
M
Michał Gallus 已提交
253 254
    }

255 256
    return this->AcquireMemoryWithReorder(
        user_md, this->fwd_pd_->src_desc(), to_void_cast<T_in>(x_data));
257
  }
M
mozga-intel 已提交
258

259
  std::shared_ptr<dnnl::memory> AcquireBiasMemoryWithReorder(
260
      const framework::ExecutionContext& ctx, const phi::DenseTensor* bias) {
261 262
    const float* bias_data = bias->data<float>();

263
    if (phi::funcs::is_int8<T_w>() == false) {
264 265 266 267 268 269 270 271 272
      // for BF16/FP32 bias is 1D and has no scales, so reorder is not needed
      return this->AcquireMemoryFromPrimitive(this->fwd_pd_->bias_desc(),
                                              to_void_cast<float>(bias_data));
    } else {
      const std::string bias_key = this->memory_key_ + "@bias";
      auto memory_p = std::static_pointer_cast<dnnl::memory>(
          this->dev_ctx_.GetBlob(bias_key));

      if (!memory_p) {
273
        const auto& scale_data = GetBiasScales(ctx);
274 275 276 277 278 279
        dnnl::primitive_attr attrs;

        int mask = CreateMask(0, scale_data.size() > 1);
        attrs.set_output_scales(mask, scale_data);

        auto user_md = dnnl::memory::desc({bias->dims()[0]},
280
                                          OneDNNGetDataType<float>(),
281 282 283 284 285 286 287
                                          dnnl::memory::format_tag::a);

        memory_p = this->AcquireMemoryWithReorderAndAttrs(
            user_md,
            this->fwd_pd_->bias_desc(),
            to_void_cast<float>(bias_data),
            attrs);
288
        this->dev_ctx_.SetBlob(bias_key, memory_p);
289 290 291 292 293 294
      }
      return memory_p;
    }
  }

  std::shared_ptr<dnnl::memory> AcquireWeightsMemoryWithReorder(
295
      const phi::DenseTensor* weights, const std::vector<float>& scale_data) {
296 297 298
    const std::string weights_key = this->memory_key_ + "@weights";
    auto memory_p = std::static_pointer_cast<dnnl::memory>(
        this->dev_ctx_.GetBlob(weights_key));
M
mozga-intel 已提交
299

300 301 302 303 304
    if (!memory_p) {
      const float* weights_data = weights->data<float>();
      auto weights_dims = this->fwd_pd_->weights_desc().dims();

      auto user_md = dnnl::memory::desc(weights_dims,
305
                                        OneDNNGetDataType<float>(),
306 307
                                        dnnl::memory::format_tag::io);

308
      if (phi::funcs::is_int8<T_w>()) {
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
        dnnl::primitive_attr attrs;
        int mask = CreateMask(0, scale_data.size() > 1);
        attrs.set_output_scales(mask, scale_data);

        memory_p = this->AcquireMemoryWithReorderAndAttrs(
            user_md,
            this->fwd_pd_->weights_desc(),
            to_void_cast<float>(weights_data),
            attrs);
      } else {
        memory_p =
            this->AcquireMemoryWithReorder(user_md,
                                           this->fwd_pd_->weights_desc(),
                                           to_void_cast<float>(weights_data));
      }

      this->dev_ctx_.SetBlob(weights_key, memory_p);
    }
    return memory_p;
328
  }
M
mozga-intel 已提交
329

330
  std::shared_ptr<dnnl::memory> AcquireCustomDstMemory(
331
      const ExecutionContext& ctx, phi::DenseTensor* out) {
332 333
    if (ctx.HasAttr("fuse_residual_connection") &&
        ctx.Attr<bool>("fuse_residual_connection")) {
334
      auto* residual_param = ctx.Input<phi::DenseTensor>("ResidualData");
335 336

      PADDLE_ENFORCE_EQ(
337
          out->dims(),
338
          residual_param->dims(),
339 340 341 342
          platform::errors::InvalidArgument(
              "Output and elementwise parameter need to have the "
              "same dimension sizes, but got output's dimension = %d"
              " and residual param's dimension =%d .",
343
              out->dims().size(),
344
              residual_param->dims().size()));
345

346
      out->ShareDataWith(*residual_param);
347
    }
348
    return this->template AcquireDstMemory<T_out>(out);
349 350
  }  // namespace operators
};   // namespace paddle
351

352 353 354 355 356 357 358 359 360 361
#define IF_CHANGE_FC_TW_TYPENAME(condition, ...) \
  if (condition) {                               \
    using T_w = int8_t;                          \
    __VA_ARGS__();                               \
  } else {                                       \
    using T_w = T_in;                            \
    __VA_ARGS__();                               \
  }

template <typename T_in>
362 363 364 365 366
class FCMKLDNNKernel : public framework::OpKernel<T_in> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
    bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
367

368 369 370
    IF_CHANGE_FC_TW_TYPENAME((std::is_same<T_in, uint8_t>::value), ([&] {
                               if (force_fp32_output) {
                                 this->RunKernel<float, T_w>(ctx);
371
                               } else if (phi::funcs::is_int8<T_in>()) {
372 373 374 375 376 377 378 379 380
                                 if (fuse_relu) {
                                   this->RunKernel<uint8_t, T_w>(ctx);
                                 } else {
                                   this->RunKernel<int8_t, T_w>(ctx);
                                 }
                               } else {
                                 this->RunKernel<T_in, T_w>(ctx);
                               }
                             }));
381 382
  }

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
  void PrepareSrcMem(const std::shared_ptr<inner_product_forward>& fc_p,
                     const std::shared_ptr<dnnl::memory>& src_mem,
                     const LoDTensor* x,
                     const dnnl::engine& engine) const {
    auto x_md = x->mem_desc().reshape(src_mem->get_desc().dims());
    if (x_md != src_mem->get_desc()) {
      dnnl::memory x_mem(x_md, engine, to_void_cast<T_in>(x->data<T_in>()));
      auto reorder_p = dnnl::reorder(x_mem, *src_mem);

      auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream();
      reorder_p.execute(astream, x_mem, *src_mem);
      astream.wait();
    } else {
      src_mem->set_data_handle(to_void_cast<T_in>(x->data<T_in>()));
    }
  }

400
  template <typename T_out, typename T_w>
401 402 403 404 405 406
  void RunKernel(const framework::ExecutionContext& ctx) const {
    const auto& dev_ctx =
        ctx.template device_context<platform::MKLDNNDeviceContext>();
    const auto& mkldnn_engine = dev_ctx.GetEngine();

    const auto* x = ctx.Input<LoDTensor>("Input");
407 408
    const auto* weights = ctx.Input<phi::DenseTensor>("W");
    const auto* bias = ctx.Input<phi::DenseTensor>("Bias");
409 410 411 412
    auto out = ctx.Output<LoDTensor>("Out");

    const auto& scale_weights = ctx.Attr<std::vector<float>>("Scale_weights");

413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
    std::shared_ptr<dnnl::inner_product_forward> fc_p;
    std::shared_ptr<dnnl::memory> src_memory_p;
    std::shared_ptr<dnnl::memory> weights_memory_p;
    std::shared_ptr<dnnl::memory> bias_memory_p;
    std::shared_ptr<dnnl::memory> dst_memory_p;

    std::string cache_key;
    cache_key.reserve(64);
    cache_key = platform::ExtendKeyWithThreadInfoIfNeeded(
        dev_ctx,
        platform::CreateKey(dev_ctx,
                            ctx.InputName("Input"),
                            ctx.InputName("W"),
                            phi::vectorize(x->dims())));

    auto inner_product_cache =
        std::static_pointer_cast<InnerProductCache>(dev_ctx.GetBlob(cache_key));

431 432
    RecomputeOutputDims(ctx, x, weights, out);

433 434 435 436 437 438 439 440 441 442 443 444 445 446
    if (inner_product_cache) {
      fc_p = std::make_shared<dnnl::inner_product_forward>(
          inner_product_cache->inner_product_p);
      src_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->src_mem);
      PrepareSrcMem(fc_p, src_memory_p, x, mkldnn_engine);

      weights_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->weights_mem);

      dst_memory_p =
          std::make_shared<dnnl::memory>(inner_product_cache->dst_mem);
      if (ctx.HasAttr("fuse_residual_connection") &&
          ctx.Attr<bool>("fuse_residual_connection")) {
447
        auto* residual_param = ctx.Input<phi::DenseTensor>("ResidualData");
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
        out->ShareDataWith(*residual_param);
      }
      auto out_ptr = out->mutable_data<T_out>(
          ctx.GetPlace(), dst_memory_p->get_desc().get_size());
      dst_memory_p->set_data_handle(out_ptr);

      if (bias) {
        bias_memory_p =
            std::make_shared<dnnl::memory>(inner_product_cache->bias_mem);
      }
    } else {
      auto in_col_dims = ctx.Attr<int>("in_num_col_dims");

      FCMKLDNNHandler<T_in, T_w, T_out> handler(ctx,
                                                dev_ctx,
                                                x,
                                                weights,
                                                bias,
                                                out,
                                                in_col_dims,
                                                mkldnn_engine,
                                                ctx.GetPlace());

      src_memory_p = handler.AcquireSrcMemoryWithReorder(x);
      weights_memory_p =
          handler.AcquireWeightsMemoryWithReorder(weights, scale_weights);
      dst_memory_p = handler.AcquireCustomDstMemory(ctx, out);

      if (bias) {
477
        bias_memory_p = handler.AcquireBiasMemoryWithReorder(ctx, bias);
478 479 480 481 482
      }

      fc_p = handler.AcquireForwardPrimitive();
    }

483 484 485 486 487 488 489 490 491 492 493 494 495 496
    auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream();

    std::unordered_map<int, dnnl::memory> fc_args = {
        {DNNL_ARG_SRC, *src_memory_p},
        {DNNL_ARG_WEIGHTS, *weights_memory_p},
        {DNNL_ARG_DST, *dst_memory_p}};

    if (bias) {
      fc_args.insert({DNNL_ARG_BIAS, *bias_memory_p});
    }

    fc_p->execute(astream, fc_args);
    astream.wait();

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
    if (!inner_product_cache) {
      auto ip_cache = std::make_shared<InnerProductCache>();
      ip_cache->inner_product_p = *fc_p;
      ip_cache->src_mem = *src_memory_p;
      ip_cache->weights_mem = *weights_memory_p;
      ip_cache->dst_mem = *dst_memory_p;
      if (bias) {
        ip_cache->bias_mem = *bias_memory_p;
      }
      dev_ctx.SetBlob(cache_key, ip_cache);
    }

    platform::SetOutMemDescWithLogicalLayoutFusesSupport(
        ctx,
        out,
512
        dst_memory_p->get_desc().reshape(phi::vectorize(out->dims())));
513
  }
M
mozga-intel 已提交
514

515
  void RecomputeOutputDims(const ExecutionContext& ctx,
516
                           const LoDTensor* x,
517
                           const phi::DenseTensor* weights,
518
                           LoDTensor* out) const {
L
luotao1 已提交
519
    int in_num_col_dims = ctx.Attr<int>("in_num_col_dims");
520
    bool padding_weights = ctx.Attr<bool>("padding_weights");
521 522
    PADDLE_ENFORCE_EQ(padding_weights,
                      false,
523 524
                      platform::errors::PermissionDenied(
                          "Weight padding in fc can not be used in MKLDNN."));
L
luotao1 已提交
525
    std::vector<int64_t> output_dims;
526 527
    FCOutputSize(x->dims(),
                 weights->dims(),
528 529
                 output_dims,
                 in_num_col_dims,
530
                 padding_weights);
531 532
    out->Resize(phi::make_ddim(output_dims));
    out->set_lod(x->lod());
533 534
  }
};
M
mozga-intel 已提交
535 536 537 538

}  // namespace operators
}  // namespace paddle

M
Michał Gallus 已提交
539 540 541 542
// Weights of FC are by default stored using fp32, template argument of weight
// data type implies their destination data type. (What's eventually going to
// be used during computations of kernel).
namespace ops = paddle::operators;
543 544 545 546 547 548 549 550

REGISTER_OP_KERNEL(fc,
                   MKLDNN,
                   ::paddle::platform::CPUPlace,
                   ops::FCMKLDNNKernel<float>,
                   ops::FCMKLDNNKernel<paddle::platform::bfloat16>,
                   ops::FCMKLDNNKernel<uint8_t>,
                   ops::FCMKLDNNKernel<int8_t>);