cpu_quantize_pass.cc 40.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.h"
W
wanghuancoder 已提交
16

17
#include <sstream>
18 19
#include <utility>
#include <vector>
W
wanghuancoder 已提交
20

21
#include "paddle/fluid/platform/mkldnn_helper.h"
22 23 24 25 26 27
#include "paddle/fluid/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {

28
using EigenVectorArrayMap = Eigen::Map<Eigen::Array<double, Eigen::Dynamic, 1>>;
29 30
using EigenVectorArrayMapFloat =
    Eigen::Map<Eigen::Array<float, Eigen::Dynamic, 1>>;
31 32
using string::PrettyLogDetail;

33 34 35 36 37 38 39 40 41
namespace {

void UnlinkNodes(ir::Node* a, ir::Node* b) {
  a->outputs.erase(std::remove(a->outputs.begin(), a->outputs.end(), b),
                   a->outputs.end());
  b->inputs.erase(std::remove(b->inputs.begin(), b->inputs.end(), a),
                  b->inputs.end());
}

42
void LogCannotQuantizeOp(Node* op, const char* details = nullptr) {
43 44 45
  std::stringstream msg_ss;
  msg_ss << "Cannot quantize operator " << op->Name()
         << " (type: " << op->Op()->Type() << ", id: " << op->id() << ").";
46
  if (details) msg_ss << " " << details;
47 48 49
  PrettyLogDetail(msg_ss.str().c_str());
}

50 51 52 53 54 55
void LogScaleIsMissingForVarName(const std::string& name) {
  VLOG(4) << "Quantization scale for the variable " << name << " is missing.";
}

void LogScaleIsMissingForVarNode(Node* node) {
  LogScaleIsMissingForVarName(node->Name());
56 57
}

58 59 60 61
void LogQuantizationDisabled(Node* op) {
  std::stringstream msg_ss;
  VLOG(4) << "Qantization skipped for operator " << op->Name()
          << " (type: " << op->Op()->Type() << ", id: " << op->id()
62
          << "). Attribute mkldnn_data_type != \"int8\".";
63 64
}

65 66 67 68 69 70
}  // namespace

enum { U8_MAX = 255, S8_MAX = 127 };

void CPUQuantizePass::QuantizeInput(Graph* g, Node* op, Node* input,
                                    std::string input_name, double scale_to_one,
71 72 73
                                    bool is_input_unsigned,
                                    std::string scale_attr_name, float shift,
                                    std::string shift_attr_name) const {
M
Michał Gallus 已提交
74 75 76
  auto inputs = op->Op()->InputNames();
  bool name_found =
      std::find(inputs.begin(), inputs.end(), input_name) != inputs.end();
77 78 79 80
  PADDLE_ENFORCE_EQ(name_found, true,
                    platform::errors::InvalidArgument(
                        "Var(%s) isn't the input of the %s operator.",
                        input_name, op->Op()->Type()));
81
  unsigned max = is_input_unsigned ? U8_MAX : S8_MAX;
82 83 84 85 86 87 88 89 90 91 92 93 94
  float scale = scale_to_one * max;

  // Create quantize output variable
  VarDesc quantize_out_desc(patterns::PDNodeName("quantize", "out"));
  auto* quantize_out_node = g->CreateVarNode(&quantize_out_desc);

  // create a quantize op node
  OpDesc q_desc;
  q_desc.SetType("quantize");
  q_desc.SetInput("Input", std::vector<std::string>({input->Name()}));
  q_desc.SetOutput("Output",
                   std::vector<std::string>({quantize_out_node->Name()}));
  q_desc.SetAttr("Scale", scale);
95 96
  q_desc.SetAttr("Shift", shift);
  q_desc.SetAttr("is_negative_input", !is_input_unsigned);
97

Z
Zuza 已提交
98 99 100 101 102 103 104 105 106 107 108
  // fix to fc format error
  if (op->Op()->Type() == "fc" &&
      op->Op()->GetAttrIfExists<int>("in_num_col_dims") == 2) {
    q_desc.SetAttr("output_format", Has("data_layout")
                                        ? Get<std::string>("data_layout")
                                        : "NCHW");
  } else {
    q_desc.SetAttr("output_format", Has("data_layout")
                                        ? Get<std::string>("data_layout")
                                        : "NHWC");
  }
109 110 111 112 113 114 115 116 117 118 119 120 121
  auto quantize_op = g->CreateOpNode(&q_desc);  // OpDesc will be copied.

  // update op's input
  op->Op()->SetInput(input_name,
                     std::vector<std::string>({quantize_out_node->Name()}));

  // link quantize op
  UnlinkNodes(input, op);
  IR_NODE_LINK_TO(input, quantize_op);
  IR_NODE_LINK_TO(quantize_op, quantize_out_node);
  IR_NODE_LINK_TO(quantize_out_node, op);

  if (!scale_attr_name.empty()) op->Op()->SetAttr(scale_attr_name, scale);
122
  if (!shift_attr_name.empty()) op->Op()->SetAttr(shift_attr_name, shift);
123 124
}

125
void CPUQuantizePass::QuantizeInputs(Graph* g, Node* op, std::string input_name,
126 127 128
                                     bool are_inputs_unsigned,
                                     std::string scale_attr_name, float shift,
                                     std::string shift_attr_name) const {
129
  auto inputs = op->inputs;
130
  auto output = op->outputs[0];
131 132 133 134 135 136 137 138
  PADDLE_ENFORCE_GE(inputs.size(), 1,
                    platform::errors::InvalidArgument(
                        "OP(%s)'s inputs(%d) must be equal or greater than 1.",
                        op->Name(), inputs.size()));
  PADDLE_ENFORCE_EQ(op->outputs.size(), 1,
                    platform::errors::InvalidArgument(
                        "OP(%s)'s outputs(%d) must be equal to 1.", op->Name(),
                        op->outputs.size()));
139 140 141 142 143 144 145 146

  // create a quantize op desc prototype
  OpDesc q_desc;
  q_desc.SetType("quantize");

  std::vector<Node*> quantize_out_nodes(inputs.size());
  std::vector<std::string> quantize_out_node_names(inputs.size());

147
  double scale_out = GetScaleValueForNode(output);
148
  unsigned max = are_inputs_unsigned ? U8_MAX : S8_MAX;
149
  float scale = scale_out * max;
150 151 152 153 154 155 156 157

  for (size_t i = 0; i < inputs.size(); i++) {
    // Create quantize output variable
    VarDesc quantize_out_desc(patterns::PDNodeName("quantize", "out"));
    quantize_out_nodes[i] = g->CreateVarNode(&quantize_out_desc);
    quantize_out_node_names[i] = quantize_out_nodes[i]->Name();

    q_desc.SetAttr("Scale", scale);
158
    q_desc.SetAttr("Shift", shift);
159 160 161
    q_desc.SetInput("Input", std::vector<std::string>({inputs[i]->Name()}));
    q_desc.SetOutput("Output",
                     std::vector<std::string>({quantize_out_node_names[i]}));
162
    q_desc.SetAttr("is_negative_input", !are_inputs_unsigned);
163 164 165 166 167 168 169 170 171 172 173 174 175
    auto quantize_op = g->CreateOpNode(&q_desc);  // OpDesc will be copied.

    // link quantize op
    UnlinkNodes(inputs[i], op);
    IR_NODE_LINK_TO(inputs[i], quantize_op);
    IR_NODE_LINK_TO(quantize_op, quantize_out_nodes[i]);
    IR_NODE_LINK_TO(quantize_out_nodes[i], op);
  }

  // update op's input
  op->Op()->SetInput(input_name, quantize_out_node_names);

  if (!scale_attr_name.empty()) op->Op()->SetAttr(scale_attr_name, scale);
176
  if (!shift_attr_name.empty()) op->Op()->SetAttr(shift_attr_name, shift);
177 178
}

179 180 181 182
void CPUQuantizePass::DequantizeOutput(Graph* g, Node* op, Node* output,
                                       std::string output_name,
                                       double scale_to_one, bool is_unsigned,
                                       std::string scale_attr_name) const {
M
Michał Gallus 已提交
183 184 185 186 187
  auto outputs = op->Op()->OutputNames();
  bool name_found =
      std::find(outputs.begin(), outputs.end(), output_name) != outputs.end();
  PADDLE_ENFORCE_EQ(name_found, true,
                    platform::errors::InvalidArgument(
188 189
                        "Var(%s) isn't the output of the %s operator.",
                        output_name, op->Op()->Type()));
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
  unsigned max = is_unsigned ? U8_MAX : S8_MAX;
  float scale = scale_to_one * max;

  // Create dequantize input variable
  VarDesc dequantize_in_desc(patterns::PDNodeName("dequantize", "in"));
  auto* dequantize_in_node = g->CreateVarNode(&dequantize_in_desc);

  // create a dequantize op node for output.
  OpDesc deq_desc;
  deq_desc.SetType("dequantize");
  deq_desc.SetInput("Input",
                    std::vector<std::string>({dequantize_in_node->Name()}));
  deq_desc.SetOutput("Output", std::vector<std::string>({output->Name()}));
  deq_desc.SetAttr("Scale", scale);
  auto dequantize_op = g->CreateOpNode(&deq_desc);  // OpDesc will be copied.

  // update op's output
  op->Op()->SetOutput(output_name,
                      std::vector<std::string>({dequantize_in_node->Name()}));

  // link dequantize op
  UnlinkNodes(op, output);
  IR_NODE_LINK_TO(op, dequantize_in_node);
  IR_NODE_LINK_TO(dequantize_in_node, dequantize_op);
  IR_NODE_LINK_TO(dequantize_op, output);

  if (!scale_attr_name.empty()) op->Op()->SetAttr(scale_attr_name, scale);
}

219 220 221 222 223 224 225 226 227 228 229 230 231
bool CPUQuantizePass::AreScalesPresentForVarNames(
    std::vector<std::string> names) const {
  auto& scales = Get<VarQuantScale>("quant_var_scales");
  bool present = true;
  for (auto name : names) {
    if (scales.find(name) == scales.end()) {
      present = false;
      LogScaleIsMissingForVarName(name);
    }
  }
  return present;
}

232
bool CPUQuantizePass::AreScalesPresentForNodes(
233
    std::initializer_list<Node*> nodes) const {
234 235 236 237 238
  auto& scales = Get<VarQuantScale>("quant_var_scales");
  bool present = true;
  for (auto node : nodes) {
    if (scales.count(node->Name()) == 0) {
      present = false;
239
      LogScaleIsMissingForVarNode(node);
240 241 242 243 244
    }
  }
  return present;
}

245 246 247 248 249 250
std::pair<bool, LoDTensor> CPUQuantizePass::GetScaleDataByName(
    const std::string& name) const {
  auto& scales = Get<VarQuantScale>("quant_var_scales");
  return scales.at(name);
}

251 252
std::pair<bool, LoDTensor> CPUQuantizePass::GetScaleDataForNode(
    const Node* node) const {
253 254 255 256 257
  return GetScaleDataByName(node->Name());
}

LoDTensor CPUQuantizePass::GetScaleTensorByName(const std::string& name) const {
  return GetScaleDataByName(name).second;
258 259 260 261 262 263 264 265 266 267 268 269 270
}

LoDTensor CPUQuantizePass::GetScaleTensorForNode(const Node* node) const {
  return GetScaleDataForNode(node).second;
}

double CPUQuantizePass::GetScaleValueForNode(const Node* node,
                                             bool* is_unsigned) const {
  auto scale_data = GetScaleDataForNode(node);
  if (is_unsigned != nullptr) *is_unsigned = scale_data.first;
  return scale_data.second.data<double>()[0];
}

271 272
bool CPUQuantizePass::IsOpDequantized(const Node* node) const {
  return node->Op()->Type() == "dequantize" ||
273
         platform::HasOpINT8DataType(node->Op());
274 275 276 277
}

bool CPUQuantizePass::IsOpQuantized(const Node* node) const {
  return node->Op()->Type() == "quantize" ||
278
         platform::HasOpINT8DataType(node->Op());
279 280
}

281 282 283 284 285 286 287 288 289 290 291 292 293 294
void CPUQuantizePass::QuantizeConv(Graph* graph,
                                   bool with_residual_data) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::ConvResidual conv_pattern{pattern, name_scope_};
  conv_pattern(with_residual_data);

  int quantize_conv_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize conv2d op";
    GET_IR_NODE_FROM_SUBGRAPH(conv_op, conv_op, conv_pattern);

    // skip if should not be quantized
295
    if (!platform::HasOpINT8DataType(conv_op->Op())) {
296 297 298
      LogQuantizationDisabled(conv_op);
      return;
    }
299 300 301 302 303

    GET_IR_NODE_FROM_SUBGRAPH(conv_filter, conv_filter, conv_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(conv_input, conv_input, conv_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(conv_output, conv_output, conv_pattern);

304
    auto has_output_scale = AreScalesPresentForNodes({conv_output});
W
Wojciech Uss 已提交
305 306 307 308 309 310 311
    if (with_residual_data && !has_output_scale) {
      LogCannotQuantizeOp(conv_op,
                          "Conv op with ResidualData input cannot be quantized "
                          "without output scale.");
      return;
    }

312 313 314
    if (with_residual_data) {
      GET_IR_NODE_FROM_SUBGRAPH(conv_residual_data, conv_residual_data,
                                conv_pattern);
315
      if (!AreScalesPresentForNodes(
316
              {conv_input, conv_filter, conv_residual_data})) {
317
        LogCannotQuantizeOp(conv_op);
318
        return;
319
      }
320 321 322 323 324 325 326 327

      bool is_residual_unsigned{false};
      auto residual_scale =
          GetScaleValueForNode(conv_residual_data, &is_residual_unsigned);

      QuantizeInput(g, conv_op, conv_residual_data, "ResidualData",
                    residual_scale, is_residual_unsigned, "Scale_in_eltwise");
    } else {
328
      if (!AreScalesPresentForNodes({conv_input, conv_filter})) {
329
        LogCannotQuantizeOp(conv_op);
330
        return;
331
      }
332 333
    }

334 335
    bool is_input_unsigned{false};
    auto input_scale = GetScaleValueForNode(conv_input, &is_input_unsigned);
336 337 338
    QuantizeInput(g, conv_op, conv_input, "Input", input_scale,
                  is_input_unsigned, "Scale_in");

339
    auto filter_scale_tensor = GetScaleTensorForNode(conv_filter);
340
    EigenVectorArrayMap eigen_tensor{filter_scale_tensor.data<double>(),
341
                                     filter_scale_tensor.numel()};
342 343 344 345 346 347 348
    eigen_tensor *= static_cast<double>(S8_MAX);
    std::vector<float> filter_scale{
        filter_scale_tensor.data<double>(),
        filter_scale_tensor.data<double>() + filter_scale_tensor.numel()};

    conv_op->Op()->SetAttr("Scale_weights", filter_scale);

349
    // if quantization scale is missing for output tensor, return fp32 data
W
Wojciech Uss 已提交
350
    if (has_output_scale) {
351 352 353 354 355 356 357 358
      bool is_output_unsigned{false};
      auto output_scale =
          GetScaleValueForNode(conv_output, &is_output_unsigned);
      DequantizeOutput(g, conv_op, conv_output, "Output", output_scale,
                       is_output_unsigned, "Scale_out");
    } else {
      conv_op->Op()->SetAttr("force_fp32_output", true);
    }
359

360
    // change threshold in bounded ReLu
361 362
    if (conv_op->Op()->GetAttrIfExists<std::string>("fuse_activation") ==
        "relu6") {
363 364 365 366
      float scale_out =
          BOOST_GET_CONST(float, conv_op->Op()->GetAttr("Scale_out"));
      float threshold =
          BOOST_GET_CONST(float, conv_op->Op()->GetAttr("fuse_alpha"));
367
      conv_op->Op()->SetAttr("fuse_alpha", scale_out * threshold);
368 369
    }

370 371 372 373 374 375 376 377 378 379 380 381
    ++quantize_conv_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_conv_count);

  std::stringstream msg_ss;
  msg_ss << "---    quantized " << quantize_conv_count << " conv2d ops";
  if (with_residual_data) msg_ss << " with residual connection";
  PrettyLogDetail(msg_ss.str().c_str());
}

M
Michał Gallus 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
void CPUQuantizePass::QuantizeFc(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::FCMKLDNN fc_pattern{pattern, name_scope_};
  auto* fc_input = gpd.mutable_pattern()
                       ->NewNode("fc_quantizer/input")
                       ->AsInput()
                       ->assert_is_op_input("fc", "Input");
  fc_pattern(fc_input, false);

  int quantize_fc_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize fc op";
    GET_IR_NODE_FROM_SUBGRAPH(fc, fc, fc_pattern);

    // skip if should not be quantized
399
    if (!platform::HasOpINT8DataType(fc->Op())) {
400 401 402
      LogQuantizationDisabled(fc);
      return;
    }
403
    if (!fc->Op()->GetAttrIfExists<bool>("use_mkldnn")) {
M
Michał Gallus 已提交
404
      return;
405
    }
M
Michał Gallus 已提交
406 407 408 409 410

    GET_IR_NODE_FROM_SUBGRAPH(weights, weights, fc_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(input, input, fc_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(output, output, fc_pattern);

411
    if (!AreScalesPresentForNodes({input, weights})) {
412 413 414
      LogCannotQuantizeOp(fc);
      return;
    }
415

416 417
    bool is_input_unsigned{false};
    auto input_scale = GetScaleValueForNode(input, &is_input_unsigned);
M
Michał Gallus 已提交
418 419 420
    QuantizeInput(g, fc, input, "Input", input_scale, is_input_unsigned,
                  "Scale_in");

421
    auto weight_scale_tensor = GetScaleTensorForNode(weights);
M
Michał Gallus 已提交
422
    EigenVectorArrayMap eigen_tensor{weight_scale_tensor.data<double>(),
423
                                     weight_scale_tensor.numel()};
M
Michał Gallus 已提交
424 425 426 427 428 429 430
    eigen_tensor *= static_cast<double>(S8_MAX);
    std::vector<float> filter_scale{
        weight_scale_tensor.data<double>(),
        weight_scale_tensor.data<double>() + weight_scale_tensor.numel()};

    fc->Op()->SetAttr("Scale_weights", filter_scale);

431
    // if quantization scale is missing for output tensor, return fp32 data
432
    if (AreScalesPresentForNodes({output})) {
433 434 435 436 437 438 439
      bool is_output_unsigned{false};
      auto output_scale = GetScaleValueForNode(output, &is_output_unsigned);
      DequantizeOutput(g, fc, output, "Out", output_scale, is_output_unsigned,
                       "Scale_out");
    } else {
      fc->Op()->SetAttr("force_fp32_output", true);
    }
M
Michał Gallus 已提交
440 441 442 443 444 445 446 447 448 449 450 451

    ++quantize_fc_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_fc_count);

  std::stringstream msg_ss;
  msg_ss << "---    quantized " << quantize_fc_count << " fc ops";
  PrettyLogDetail(msg_ss.str().c_str());
}

452 453 454 455 456 457 458 459 460 461 462 463 464
void CPUQuantizePass::QuantizePool(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::Pool pool_pattern{pattern, name_scope_};
  pool_pattern();

  int quantize_pool_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize pool2d op";
    GET_IR_NODE_FROM_SUBGRAPH(pool_op, pool_op, pool_pattern);

    // skip if should not be quantized
465
    if (!platform::HasOpINT8DataType(pool_op->Op())) {
466 467 468
      LogQuantizationDisabled(pool_op);
      return;
    }
469 470 471 472

    GET_IR_NODE_FROM_SUBGRAPH(pool_input, pool_input, pool_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(pool_output, pool_output, pool_pattern);

473
    if (!AreScalesPresentForNodes({pool_input, pool_output})) {
474 475 476
      LogCannotQuantizeOp(pool_op);
      return;
    }
477

478 479
    bool is_input_unsigned{false};
    auto input_scale = GetScaleValueForNode(pool_input, &is_input_unsigned);
480 481
    QuantizeInput(g, pool_op, pool_input, "X", input_scale, is_input_unsigned);

482 483
    bool is_output_unsigned{false};
    auto output_scale = GetScaleValueForNode(pool_output, &is_output_unsigned);
484 485 486 487 488 489 490 491 492 493 494 495
    DequantizeOutput(g, pool_op, pool_output, "Out", output_scale,
                     is_output_unsigned);

    ++quantize_pool_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_pool_count);

  PrettyLogDetail("---    quantized %d pool2d ops", quantize_pool_count);
}

496 497 498 499 500 501 502 503 504 505 506 507 508
void CPUQuantizePass::QuantizeConcat(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::Concat concat_pattern{pattern, name_scope_};
  concat_pattern();

  int quantize_concat_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize concat op";
    GET_IR_NODE_FROM_SUBGRAPH(concat_op, concat_op, concat_pattern);

    // skip if should not be quantized
509
    if (!platform::HasOpINT8DataType(concat_op->Op())) {
510 511 512
      LogQuantizationDisabled(concat_op);
      return;
    }
513 514 515

    GET_IR_NODE_FROM_SUBGRAPH(concat_out, concat_out, concat_pattern);

516
    if (!AreScalesPresentForNodes({concat_out})) {
517 518 519
      LogCannotQuantizeOp(concat_op);
      return;
    }
520

521 522
    // if all inputs were unsigned, then the output was set to unsigned
    // during the scale calculation step
523 524 525
    bool are_all_inputs_unsigned{false};
    auto output_scale =
        GetScaleValueForNode(concat_out, &are_all_inputs_unsigned);
526

527
    QuantizeInputs(g, concat_op, "X", are_all_inputs_unsigned);
528 529 530 531 532 533 534 535 536 537 538 539 540

    DequantizeOutput(g, concat_op, concat_out, "Out", output_scale,
                     are_all_inputs_unsigned);

    ++quantize_concat_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_concat_count);

  PrettyLogDetail("---    quantized %d concat ops", quantize_concat_count);
}

541 542 543 544 545 546 547 548 549 550 551 552 553
void CPUQuantizePass::QuantizePriorBox(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::PriorBox prior_box_pattern{pattern, name_scope_};
  prior_box_pattern();

  int quantize_prior_box_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize prior_box op";
    GET_IR_NODE_FROM_SUBGRAPH(prior_box_op, prior_box_op, prior_box_pattern);

    // skip if should not be quantized
554
    if (!platform::HasOpINT8DataType(prior_box_op->Op())) {
555 556 557
      LogQuantizationDisabled(prior_box_op);
      return;
    }
558 559 560 561

    GET_IR_NODE_FROM_SUBGRAPH(prior_box_input, prior_box_input,
                              prior_box_pattern);

562
    if (!AreScalesPresentForNodes({prior_box_input})) {
563 564 565
      LogCannotQuantizeOp(prior_box_op);
      return;
    }
566

567 568 569
    bool is_input_unsigned{false};
    auto input_scale =
        GetScaleValueForNode(prior_box_input, &is_input_unsigned);
570 571 572 573 574 575 576 577 578 579 580 581 582
    QuantizeInput(g, prior_box_op, prior_box_input, "Input", input_scale,
                  is_input_unsigned);

    ++quantize_prior_box_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_prior_box_count);

  PrettyLogDetail("---    quantized %d prior_box ops",
                  quantize_prior_box_count);
}

583 584 585 586 587 588 589 590 591 592 593 594 595
void CPUQuantizePass::QuantizeTranspose(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::Transpose transpose_pattern{pattern, name_scope_};
  transpose_pattern();

  int quantize_transpose_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize transpose op";
    GET_IR_NODE_FROM_SUBGRAPH(transpose_op, transpose_op, transpose_pattern);

    // skip if should not be quantized
596
    if (!platform::HasOpINT8DataType(transpose_op->Op())) {
597
      LogQuantizationDisabled(transpose_op);
598 599 600 601 602
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(prev_op, prev_op, transpose_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(next_op, next_op, transpose_pattern);

603 604
    // skip if prev op and next op is not quantized
    if (!(IsOpDequantized(prev_op)) && !(IsOpQuantized(next_op))) {
605 606 607 608 609
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(transpose_in, transpose_in, transpose_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(transpose_out, transpose_out, transpose_pattern);

610
    if (!AreScalesPresentForNodes({transpose_in, transpose_out})) {
611
      LogCannotQuantizeOp(transpose_op);
612
      return;
613
    }
614

615 616
    bool is_input_unsigned{false};
    auto input_scale = GetScaleValueForNode(transpose_in, &is_input_unsigned);
617 618 619
    QuantizeInput(g, transpose_op, transpose_in, "X", input_scale,
                  is_input_unsigned);

620 621 622
    bool is_output_unsigned{false};
    auto output_scale =
        GetScaleValueForNode(transpose_out, &is_output_unsigned);
623 624 625 626 627 628 629 630 631 632 633 634 635
    DequantizeOutput(g, transpose_op, transpose_out, "Out", output_scale,
                     is_output_unsigned);

    ++quantize_transpose_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_transpose_count);

  PrettyLogDetail("---    quantized %d transpose ops",
                  quantize_transpose_count);
}

636 637 638 639 640 641 642 643 644 645 646 647 648
void CPUQuantizePass::QuantizeReshape(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::Reshape reshape_pattern{pattern, name_scope_};
  reshape_pattern();

  int quantize_reshape_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize reshape op";
    GET_IR_NODE_FROM_SUBGRAPH(reshape_op, reshape_op, reshape_pattern);

    // skip if should not be quantized
649
    if (!platform::HasOpINT8DataType(reshape_op->Op())) {
650
      LogQuantizationDisabled(reshape_op);
651 652 653 654 655
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(prev_op, prev_op, reshape_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(next_op, next_op, reshape_pattern);

656 657
    // skip if prev op and next op is not quantized
    if (!(IsOpDequantized(prev_op)) && !(IsOpQuantized(next_op))) {
658 659 660 661 662 663
      return;
    }

    GET_IR_NODE_FROM_SUBGRAPH(reshape_in, reshape_in, reshape_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(reshape_out, reshape_out, reshape_pattern);

664
    if (!AreScalesPresentForNodes({reshape_in, reshape_out})) {
665
      LogCannotQuantizeOp(reshape_op);
666
      return;
667
    }
668

669 670
    bool is_input_unsigned{false};
    auto input_scale = GetScaleValueForNode(reshape_in, &is_input_unsigned);
671 672 673
    QuantizeInput(g, reshape_op, reshape_in, "X", input_scale,
                  is_input_unsigned);

674 675
    bool is_output_unsigned{false};
    auto output_scale = GetScaleValueForNode(reshape_out, &is_output_unsigned);
676 677 678 679 680 681 682 683 684 685 686 687
    DequantizeOutput(g, reshape_op, reshape_out, "Out", output_scale,
                     is_output_unsigned);

    ++quantize_reshape_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_reshape_count);

  PrettyLogDetail("---    quantized %d reshape ops", quantize_reshape_count);
}

Z
Zuza 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
void CPUQuantizePass::QuantizeSlice(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::Slice slice_pattern{pattern, name_scope_};
  slice_pattern();

  int quantize_slice_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize slice op";
    GET_IR_NODE_FROM_SUBGRAPH(slice_op, slice_op, slice_pattern);

    // skip if should not be quantized
    if (!platform::HasOpINT8DataType(slice_op->Op())) {
      LogQuantizationDisabled(slice_op);
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(prev_op, prev_op, slice_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(next_op, next_op, slice_pattern);

    // skip if prev op and next op is not quantized
    if (!IsOpDequantized(prev_op) && !IsOpQuantized(next_op)) {
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(slice_in, slice_in, slice_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(slice_out, slice_out, slice_pattern);

    if (!AreScalesPresentForNodes({slice_out})) {
      LogCannotQuantizeOp(slice_op);
      return;
    }

    bool is_input_unsigned{false};
    auto input_scale = GetScaleValueForNode(slice_out, &is_input_unsigned);
    QuantizeInput(g, slice_op, slice_in, "Input", input_scale,
                  is_input_unsigned);

    bool is_output_unsigned{false};
    auto output_scale = GetScaleValueForNode(slice_out, &is_output_unsigned);
    DequantizeOutput(g, slice_op, slice_out, "Out", output_scale,
                     is_output_unsigned);

    ++quantize_slice_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_slice_count);

  PrettyLogDetail("---    quantized %d slice ops", quantize_slice_count);
}

739 740 741
void CPUQuantizePass::QuantizeMatmul(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
742
  patterns::MatmulWithInputOps matmul_pattern{pattern, name_scope_};
743 744 745 746 747 748 749 750 751
  matmul_pattern();

  int quantize_matmul_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize matmul op";
    GET_IR_NODE_FROM_SUBGRAPH(matmul_op, matmul_op, matmul_pattern);

    // skip if should not be quantized
752
    if (!platform::HasOpINT8DataType(matmul_op->Op())) {
753
      LogQuantizationDisabled(matmul_op);
754 755 756 757 758 759 760 761 762 763 764 765 766
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(prev_op_x, prev_op_x, matmul_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(prev_op_y, prev_op_y, matmul_pattern);

    // skip if prev ops are not quantized
    if (!IsOpDequantized(prev_op_x) || !IsOpDequantized(prev_op_y)) {
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(matmul_in_x, matmul_in_x, matmul_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(matmul_in_y, matmul_in_y, matmul_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(matmul_out, matmul_out, matmul_pattern);

767
    if (!AreScalesPresentForNodes({matmul_in_x, matmul_in_y})) {
768
      LogCannotQuantizeOp(matmul_op);
769
      return;
770
    }
771

772 773 774
    bool is_x_unsigned{false}, is_y_unsigned{false};
    auto input_x_scale = GetScaleValueForNode(matmul_in_x, &is_x_unsigned);
    auto input_y_scale = GetScaleValueForNode(matmul_in_y, &is_y_unsigned);
775 776 777 778 779 780
    PADDLE_ENFORCE_EQ(is_x_unsigned, is_y_unsigned,
                      platform::errors::InvalidArgument(
                          "Matmul inputs should have the same "
                          "attribute of signed/unsigned, but they "
                          "are different: x(%d), y(%d).",
                          is_x_unsigned, is_y_unsigned));
781 782 783 784 785
    QuantizeInput(g, matmul_op, matmul_in_x, "X", input_x_scale, is_x_unsigned,
                  "Scale_x");
    QuantizeInput(g, matmul_op, matmul_in_y, "Y", input_y_scale, is_y_unsigned,
                  "Scale_y");

786
    // if quantization scale is missing for output tensor, return fp32 data
787
    if (AreScalesPresentForNodes({matmul_out})) {
788 789 790 791 792 793 794
      bool is_output_unsigned{false};
      auto output_scale = GetScaleValueForNode(matmul_out, &is_output_unsigned);
      DequantizeOutput(g, matmul_op, matmul_out, "Out", output_scale,
                       is_output_unsigned, "Scale_out");
    } else {
      matmul_op->Op()->SetAttr("force_fp32_output", true);
    }
795 796 797 798 799 800 801 802 803

    ++quantize_matmul_count;
  };
  gpd(graph, handler);
  AddStatis(quantize_matmul_count);

  PrettyLogDetail("---    quantized %d matmul ops", quantize_matmul_count);
}

804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
void CPUQuantizePass::QuantizeElementwiseAdd(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::ElementwiseAdd elementwise_add_pattern{pattern, name_scope_};

  elementwise_add_pattern(
      pattern->NewNode(elementwise_add_pattern.elementwise_add_x_repr()),
      pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr()));

  int quantize_elementwise_add_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize elementwise_add op";
    GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op,
                              elementwise_add_pattern);

    // skip if should not be quantized
821
    if (!platform::HasOpINT8DataType(elementwise_add_op->Op())) {
822 823 824 825 826 827 828 829 830 831 832
      LogQuantizationDisabled(elementwise_add_op);
      return;
    }

    GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x,
                              elementwise_add_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y,
                              elementwise_add_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out,
                              elementwise_add_pattern);

833 834
    if (!AreScalesPresentForNodes(
            {elementwise_add_x, elementwise_add_y, elementwise_add_out})) {
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
      LogCannotQuantizeOp(elementwise_add_op);
      return;
    }

    bool is_x_unsigned{false}, is_y_unsigned{false};
    auto input_x_scale =
        GetScaleValueForNode(elementwise_add_x, &is_x_unsigned);
    auto input_y_scale =
        GetScaleValueForNode(elementwise_add_y, &is_y_unsigned);

    // TODO(sfraczek): add support for different signness
    if (is_x_unsigned != is_y_unsigned) {
      LogCannotQuantizeOp(elementwise_add_op,
                          "ElementwiseAdd inputs must be of the same type.");
      return;
    }

    QuantizeInput(g, elementwise_add_op, elementwise_add_x, "X", input_x_scale,
                  is_x_unsigned, "Scale_x");
    QuantizeInput(g, elementwise_add_op, elementwise_add_y, "Y", input_y_scale,
                  is_y_unsigned, "Scale_y");

857 858 859 860 861 862
    bool is_output_unsigned{false};
    auto output_scale =
        GetScaleValueForNode(elementwise_add_out, &is_output_unsigned);

    DequantizeOutput(g, elementwise_add_op, elementwise_add_out, "Out",
                     output_scale, is_output_unsigned, "Scale_out");
863 864 865 866 867 868 869 870 871 872

    ++quantize_elementwise_add_count;
  };
  gpd(graph, handler);
  AddStatis(quantize_elementwise_add_count);

  PrettyLogDetail("---    quantized %d elementwise_add ops",
                  quantize_elementwise_add_count);
}

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
void CPUQuantizePass::QuantizeFusionGru(Graph* graph) const {
  GraphPatternDetector gpd;
  patterns::FusionGru pattern{gpd.mutable_pattern(), name_scope_};
  pattern();

  int quantize_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize fusion_gru op";
    GET_IR_NODE_FROM_SUBGRAPH(op, op, pattern);

    // skip if should not be quantized
    if (!platform::HasOpINT8DataType(op->Op())) {
      LogQuantizationDisabled(op);
      return;
    }

    GET_IR_NODE_FROM_SUBGRAPH(x, x, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(weight_h, weight_h, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(weight_x, weight_x, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(out, out, pattern);

895
    if (!AreScalesPresentForNodes({x, weight_x})) {
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
      LogCannotQuantizeOp(op);
      return;
    }

    bool is_x_unsigned{false};
    auto input_x_scale = GetScaleValueForNode(x, &is_x_unsigned);

    double input_x_shift{128.};
    if (is_x_unsigned) input_x_shift = 0.;

    QuantizeInput(g, op, x, "X", input_x_scale, is_x_unsigned, "Scale_data",
                  input_x_shift, "Shift_data");

    auto weight_scale_tensor = GetScaleTensorForNode(weight_x);
    EigenVectorArrayMap eigen_tensor{weight_scale_tensor.data<double>(),
911
                                     weight_scale_tensor.numel()};
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
    eigen_tensor *= static_cast<double>(S8_MAX);
    std::vector<float> scale_weights{
        weight_scale_tensor.data<double>(),
        weight_scale_tensor.data<double>() + weight_scale_tensor.numel()};

    op->Op()->SetAttr("Scale_weights", scale_weights);
    // return fp32 data
    op->Op()->SetAttr("force_fp32_output", true);

    ++quantize_count;
  };
  gpd(graph, handler);
  AddStatis(quantize_count);

  PrettyLogDetail("---    quantized %d fusion_gru ops", quantize_count);
}

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
void CPUQuantizePass::QuantizeMultiGru(Graph* graph) const {
  GraphPatternDetector gpd;
  patterns::MultiGru pattern{gpd.mutable_pattern(), name_scope_};
  pattern();

  int quantize_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize multi_gru op";
    GET_IR_NODE_FROM_SUBGRAPH(gru, gru, pattern);

    // skip if should not be quantized
    if (!platform::HasOpINT8DataType(gru->Op())) {
      LogQuantizationDisabled(gru);
      return;
    }

    GET_IR_NODE_FROM_SUBGRAPH(x, x, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(wx, wx, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(h, h, pattern);

    auto wx_names = gru->Op()->Input("WeightX");
    if (!AreScalesPresentForNodes({x}) ||
        !AreScalesPresentForVarNames(wx_names)) {
      LogCannotQuantizeOp(gru);
      return;
    }

    bool is_x_unsigned{false};
    auto input_x_scale = GetScaleValueForNode(x, &is_x_unsigned);

    double input_x_shift{128.};
    if (is_x_unsigned) input_x_shift = 0.;

    QuantizeInput(g, gru, x, "X", input_x_scale, is_x_unsigned, "Scale_data",
                  input_x_shift, "Shift_data");

    auto* scope = param_scope();
    int wx_size = wx_names.size();
    std::vector<std::string> w_scale_var_names;
    for (int i = 0; i < wx_size; ++i) {
      auto scale_tensor_src = GetScaleTensorByName(wx_names[i]);
      EigenVectorArrayMap eigen_tensor_src{scale_tensor_src.data<double>(),
                                           scale_tensor_src.numel()};

      VarDesc scale_var_desc(patterns::PDNodeName("multi_gru", "w_scale"));

      scale_var_desc.SetShape(framework::vectorize(scale_tensor_src.dims()));
      scale_var_desc.SetDataType(proto::VarType::FP32);
      scale_var_desc.SetLoDLevel(scale_tensor_src.lod().size());
      scale_var_desc.SetPersistable(true);
      auto* w_scale_node = g->CreateVarNode(&scale_var_desc);

      auto* w_scale_tensor_dst =
          scope->Var(w_scale_node->Name())->GetMutable<LoDTensor>();
      w_scale_tensor_dst->Resize(scale_tensor_src.dims());
      auto* dst_data =
          w_scale_tensor_dst->mutable_data<float>(platform::CPUPlace());
      EigenVectorArrayMapFloat eigen_tensor_dst{dst_data,
                                                w_scale_tensor_dst->numel()};
      eigen_tensor_dst =
          eigen_tensor_src.cast<float>() * static_cast<float>(S8_MAX);
      w_scale_var_names.push_back(w_scale_node->Name());
      IR_NODE_LINK_TO(w_scale_node, gru);
    }

    gru->Op()->SetInput("Scale_weights", w_scale_var_names);
    // return fp32 data
    gru->Op()->SetAttr("force_fp32_output", true);

    ++quantize_count;
  };
  gpd(graph, handler);
  AddStatis(quantize_count);

  PrettyLogDetail("---    quantized %d multi_gru ops", quantize_count);
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
void CPUQuantizePass::QuantizeFusionLSTM(Graph* graph) const {
  GraphPatternDetector gpd;
  patterns::FusionLSTM pattern{gpd.mutable_pattern(), name_scope_};
  pattern();

  int quantize_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize fusion_lstm op";
    GET_IR_NODE_FROM_SUBGRAPH(op, op, pattern);

    // skip if should not be quantized
    if (!platform::HasOpINT8DataType(op->Op())) {
      LogQuantizationDisabled(op);
      return;
    }

    GET_IR_NODE_FROM_SUBGRAPH(x, x, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(weight_h, weight_h, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(weight_x, weight_x, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(hidden, hidden, pattern);
    GET_IR_NODE_FROM_SUBGRAPH(cell, cell, pattern);

    // Starting from here there maybe issues
    if (!AreScalesPresentForNodes({x, weight_x})) {
      LogCannotQuantizeOp(op);
      return;
    }

    bool is_x_unsigned{false};
    auto input_x_scale = GetScaleValueForNode(x, &is_x_unsigned);

    double input_x_shift{128.};
    if (is_x_unsigned) input_x_shift = 0.;

    QuantizeInput(g, op, x, "X", input_x_scale, is_x_unsigned, "Scale_data",
                  input_x_shift, "Shift_data");

    auto weight_scale_tensor = GetScaleTensorForNode(weight_x);
    EigenVectorArrayMap eigen_tensor{weight_scale_tensor.data<double>(),
                                     weight_scale_tensor.numel()};
    eigen_tensor *= static_cast<double>(S8_MAX);
    std::vector<float> scale_weights{
        weight_scale_tensor.data<double>(),
        weight_scale_tensor.data<double>() + weight_scale_tensor.numel()};

    op->Op()->SetAttr("Scale_weights", scale_weights);
    // return fp32 data
    op->Op()->SetAttr("force_fp32_output", true);

    ++quantize_count;
  };
  gpd(graph, handler);
  AddStatis(quantize_count);

  PrettyLogDetail("---    quantized %d fusion_lstm ops", quantize_count);
}

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
void CPUQuantizePass::QuantizeNearestInterp(Graph* graph) const {
  GraphPatternDetector gpd;
  auto pattern = gpd.mutable_pattern();
  patterns::NearestInterp nearest_interp_pattern{pattern, name_scope_};
  nearest_interp_pattern();

  int quantize_nearest_interp_count = 0;
  auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
                     Graph* g) {
    VLOG(4) << "Quantize nearest_interp op";
    GET_IR_NODE_FROM_SUBGRAPH(nearest_interp_op, nearest_interp_op,
                              nearest_interp_pattern);

    // skip if should not be quantized
    if (!platform::HasOpINT8DataType(nearest_interp_op->Op())) {
      LogQuantizationDisabled(nearest_interp_op);
      return;
    }
    GET_IR_NODE_FROM_SUBGRAPH(prev_op, prev_op, nearest_interp_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(next_op, next_op, nearest_interp_pattern);

    // skip if prev op and next op is not quantized
    if (!(IsOpDequantized(prev_op)) && !(IsOpQuantized(next_op))) {
      LogCannotQuantizeOp(nearest_interp_op,
                          "There are no other quantized operators nearby, so "
                          "quantization is not recommended.");
      return;
    }

    GET_IR_NODE_FROM_SUBGRAPH(nearest_interp_in, nearest_interp_in,
                              nearest_interp_pattern);
    GET_IR_NODE_FROM_SUBGRAPH(nearest_interp_out, nearest_interp_out,
                              nearest_interp_pattern);

    if (!AreScalesPresentForNodes({nearest_interp_in, nearest_interp_out})) {
      LogCannotQuantizeOp(nearest_interp_op);
      return;
    }

    bool is_input_unsigned{false};
    auto input_scale =
        GetScaleValueForNode(nearest_interp_in, &is_input_unsigned);
    QuantizeInput(g, nearest_interp_op, nearest_interp_in, "X", input_scale,
                  is_input_unsigned);

    bool is_output_unsigned{false};
    auto output_scale =
        GetScaleValueForNode(nearest_interp_out, &is_output_unsigned);
    DequantizeOutput(g, nearest_interp_op, nearest_interp_out, "Out",
                     output_scale, is_output_unsigned);

    ++quantize_nearest_interp_count;
  };

  gpd(graph, handler);
  AddStatis(quantize_nearest_interp_count);

  PrettyLogDetail("---    quantized %d nearest_interp ops",
                  quantize_nearest_interp_count);
}

1126
void CPUQuantizePass::ApplyImpl(ir::Graph* graph) const {
1127
  VLOG(3) << "Quantizing the graph.";
1128 1129
  PADDLE_ENFORCE_NOT_NULL(
      graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
1130
  FusePassBase::Init(name_scope_, graph);
1131

1132 1133
  PADDLE_ENFORCE_NOT_NULL(param_scope(), platform::errors::InvalidArgument(
                                             "Scope cannot be nullptr."));
1134

1135 1136 1137
  QuantizeConv(graph, false /* with_residual_data */);
  QuantizeConv(graph, true /* with_residual_data */);
  QuantizePool(graph);
1138
  QuantizeConcat(graph);
1139
  QuantizePriorBox(graph);
1140
  QuantizeTranspose(graph);
M
Michał Gallus 已提交
1141
  QuantizeFc(graph);
1142
  QuantizeReshape(graph);
1143
  QuantizeMatmul(graph);
1144
  QuantizeElementwiseAdd(graph);
1145
  QuantizeFusionGru(graph);
1146
  QuantizeMultiGru(graph);
1147
  QuantizeFusionLSTM(graph);
Z
Zuza 已提交
1148
  QuantizeSlice(graph);
1149
  QuantizeNearestInterp(graph);
1150 1151 1152 1153 1154 1155 1156 1157
}

}  // namespace ir
}  // namespace framework
}  // namespace paddle

REGISTER_PASS(cpu_quantize_pass, paddle::framework::ir::CPUQuantizePass)
    .RequirePassAttr("quant_var_scales");