提交 431b12d9 编写于 作者: N Nat Jeffries 提交者: TensorFlower Gardener

Add int16->int32 quantization to TFLM quantize op.

PiperOrigin-RevId: 339953940
Change-Id: I568589fc09fa6e5aae137e7e8533e1dab86190d3
上级 8e229bb6
......@@ -67,7 +67,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
input->type == kTfLiteInt8);
TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16);
output->type == kTfLiteInt16 ||
output->type == kTfLiteInt32);
if (((input->type == kTfLiteInt16 || input->type == kTfLiteInt8) &&
output->type == kTfLiteInt8) ||
......@@ -139,6 +140,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
data->quantization_params.zero_point,
tflite::micro::GetTensorData<int16_t>(output));
return kTfLiteOk;
case kTfLiteInt32:
reference_ops::Requantize(
tflite::micro::GetTensorData<int16_t>(input), size,
data->output_multiplier, data->output_shift, data->input_zero_point,
data->quantization_params.zero_point,
tflite::micro::GetTensorData<int32_t>(output));
return kTfLiteOk;
default:
TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.",
TfLiteTypeGetName(input->type),
......
......@@ -282,4 +282,21 @@ TF_LITE_MICRO_TEST(QuantizeOpTestInt8toInt8NoZeroPoint) {
output_zero_point, output_quantized);
}
TF_LITE_MICRO_TEST(QuantizeOpTestInt16toInt32) {
const int length = 10;
const int dims[] = {2, 2, 5};
const float values[] = {-32, -31, -30, -29, -28, 27, 28, 29, 30, 31};
const float input_scale = 1.f;
const int input_zero_point = 0;
const float output_scale = 0.5;
const int output_zero_point = 0;
int32_t output_quantized[length];
int32_t values_quantized[length];
int16_t input_quantized[length];
tflite::testing::TestRequantize(dims, values, input_quantized, input_scale,
input_zero_point, dims, values,
values_quantized, output_scale,
output_zero_point, output_quantized);
}
TF_LITE_MICRO_TESTS_END
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册