quantize_op.cc 2.1 KB
Newer Older
X
xiaoli.liu@intel.com 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 *     Unless required by applicable law or agreed to in writing, software
 *     distributed under the License is distributed on an "AS IS" BASIS,
 *     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *     See the License for the specific language governing permissions and
 *     limitations under the License. */

#include "paddle/fluid/operators/quantize_op.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

namespace paddle {
namespace operators {

framework::OpKernelType QuantOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
  framework::LibraryType library_ = framework::LibraryType::kMKLDNN;
  framework::DataLayout layout_ = framework::DataLayout::kMKLDNN;

28 29 30
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"), ctx.GetPlace(),
      layout_, library_);
X
xiaoli.liu@intel.com 已提交
31 32 33
}

void QuantOpMaker::Make() {
34 35
  AddInput("Input", "Input data");
  AddOutput("Output", "Output data");
X
xiaoli.liu@intel.com 已提交
36 37 38
  AddAttr<bool>("is_negative_input",
                "(bool, default false) Only used in mkldnn INT8 kernel")
      .SetDefault(false);
39 40 41 42 43
  AddAttr<float>("Scale", "Scale data").SetDefault({1.0f});
  AddAttr<float>(
      "Shift",
      "Shift data. When Shift is non-zero, data is quantized to unsigned int8.")
      .SetDefault({0.0f});
44 45 46
  AddAttr<std::string>("output_format",
                       "Convert format to NHWC or NCHW during quantization.")
      .SetDefault("NHWC");
47 48
  AddAttr<bool>("bfloat16", "(bool, default false) Convert to bfloat16")
      .SetDefault(false);
X
xiaoli.liu@intel.com 已提交
49 50 51 52 53 54 55
  AddComment(R"DOC(This op will quantize data from FP32 to INT8)DOC");
}

}  // namespace operators
}  // namespace paddle
namespace ops = paddle::operators;

56
REGISTER_OPERATOR(quantize, ops::QuantOp, ops::QuantOpMaker);