未验证 提交 751b36c5 编写于 作者: D deqiangc 提交者: GitHub

Add a new test case for conv operator (#329) (#377)

* Add a new test case for conv operator (#329)

This new test case is based on issue #329 to increase
coverage on optimized kernel's data precision for conv operator.
In this test, input, output and filter are all 8 bits and filter
tensor is of dimension 8x3x3x3 with different scales per output channel.

TESTED= local test with x86 and HiFi4.

* Move large test data variable into its own folder
and use one flat top namespace instead of nested namespace.

BUG=195779890
上级 44353712
......@@ -222,8 +222,8 @@ cc_library(
"//tensorflow/lite/kernels/internal:reference_base",
"//tensorflow/lite/kernels/internal:tensor",
"//tensorflow/lite/kernels/internal:types",
"//tensorflow/lite/micro:memory_helpers",
"//tensorflow/lite/micro:flatbuffer_utils",
"//tensorflow/lite/micro:memory_helpers",
"//tensorflow/lite/micro:micro_graph",
"//tensorflow/lite/micro:micro_utils",
"//tensorflow/lite/schema:schema_fbs",
......@@ -387,6 +387,7 @@ cc_test(
],
deps = [
":conv_test_common",
"//tensorflow/lite/micro/kernels/testdata:conv_test_data",
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_utils",
......
......@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/kernels/testdata/conv_test_data.h"
#include "tensorflow/lite/micro/micro_utils.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
......@@ -791,4 +792,115 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
kQuantizationTolerance));
}
// This test is created based on
// https://github.com/tensorflow/tflite-micro/issues/329
// Input, output and filter are all 8 bits.
// Filter tensor is of dimension 8x3x3x3 with different scales per output
// channel. Some arbitrary parameters come from the above issue.
TF_LITE_MICRO_TEST(Int8Filter8x3x3x3PerChannelScaleRelu6ShouldMatchGolden) {
using tflite::ElementCount;
using tflite::kConvBiasQuantized8;
using tflite::kConvFilter8x3x3x3;
using tflite::kConvGoldenOutput1x16x16x8;
using tflite::kConvInput1x32x32x3;
using tflite::testing::CreateTensor;
using tflite::testing::FloatArrayFromFloats;
using tflite::testing::IntArrayFromInts;
using tflite::testing::ValidateConvGoldens;
constexpr int kInDepth = 3;
constexpr int kOutDepth = 8;
// Input quantization parameters: same scale and zero point for all input
// elements.
constexpr float kInputScale = 0.00784313772f;
constexpr int kInputZeroPoint = -1;
float input_scales[] = {1, kInputScale};
int input_zero_points[] = {1, kInputZeroPoint};
TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales),
IntArrayFromInts(input_zero_points),
0};
// Create input tensor of size 1x32x32x3.
int input_shape[] = {4, 1, 32, 32, kInDepth};
TfLiteIntArray* input_dims = IntArrayFromInts(input_shape);
TfLiteTensor input_tensor = CreateTensor(kConvInput1x32x32x3, input_dims);
input_tensor.params = {kInputScale, kInputZeroPoint};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Filter quantization parameters: same zero point, but different scale per
// output channel.
int filter_zero_points[kOutDepth + 1] = {kOutDepth, 0, 0, 0, 0, 0, 0, 0, 0};
float filter_scales[kOutDepth + 1] = {
kOutDepth, 2.18926089e-05, 0.00453596329,
0.000504297379, 0.00184638216, 0.00596635276,
0.000199135626, 0.0047677448, 0.00193942268};
TfLiteAffineQuantization filter_quant;
filter_quant.scale = FloatArrayFromFloats(filter_scales);
filter_quant.zero_point = IntArrayFromInts(filter_zero_points);
filter_quant.quantized_dimension = 0;
// Create filter tensor of size 8x3x3x3.
int filter_shape[] = {4, kOutDepth, 3, 3, kInDepth};
TfLiteIntArray* filter_dims = IntArrayFromInts(filter_shape);
TfLiteTensor filter_tensor = CreateTensor(kConvFilter8x3x3x3, filter_dims);
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Bias quantization parameters: same zero point, but different scale per
// output channel.
int bias_zero_points[kOutDepth + 1] = {kOutDepth, 0, 0, 0, 0, 0, 0, 0, 0};
float bias_scales[kOutDepth + 1] = {
kOutDepth, 1.71706745e-07, 3.5576184e-05,
3.95527377e-06, 1.44814294e-05, 4.67949249e-05,
1.56184819e-06, 3.73940784e-05, 1.52111588e-05};
TfLiteAffineQuantization bias_quant;
bias_quant.scale = FloatArrayFromFloats(bias_scales);
bias_quant.zero_point = IntArrayFromInts(bias_zero_points);
bias_quant.quantized_dimension = 0;
// Create per output channel bias of size 8
int bias_shape[] = {1, kOutDepth};
TfLiteIntArray* bias_dims = IntArrayFromInts(bias_shape);
TfLiteTensor bias_tensor = CreateTensor(kConvBiasQuantized8, bias_dims);
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Output quantization parameters: same zero point and scale for all elements.
const float output_scale = 0.0235294122f;
const int output_zero_point = -128;
float output_scales[] = {1, output_scale};
int output_zero_points[] = {1, output_zero_point};
TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales),
IntArrayFromInts(output_zero_points),
0};
// Create output tensor of 16x16x8
int8_t output_data[1 * 16 * 16 * kOutDepth];
int output_shape[] = {4, 1, 16, 16, kOutDepth};
TfLiteIntArray* output_dims = IntArrayFromInts(output_shape);
const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor = CreateTensor(output_data, output_dims);
output_tensor.params = {output_scale, output_zero_point};
output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant};
// The 3 inputs include the input, filter and bias tensors.
constexpr int inputs_size = 3;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
input_tensor,
filter_tensor,
bias_tensor,
output_tensor,
};
TfLiteConvParams conv_params{tflite::testing::common_conv_params};
conv_params.activation = kTfLiteActRelu6;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
ValidateConvGoldens(tensors, tensors_size, kConvGoldenOutput1x16x16x8,
output_dims_count, &conv_params,
tflite::Register_CONV_2D(), output_data,
1.0 /* tolerance */));
}
TF_LITE_MICRO_TESTS_END
load("//tensorflow/lite/micro:build_def.bzl", "micro_copts")
load("//tensorflow:extra_rules.bzl", "tflm_kernel_friends")
package(
features = ["-layering_check"],
licenses = ["notice"],
default_visibility = ["//tensorflow/lite/micro/kernels:__pkg__"],
)
####################################
# C++ libraries
####################################
cc_library(
name = "conv_test_data",
srcs = ["conv_test_data.cc"],
hdrs = ["conv_test_data.h"],
deps = ["//tensorflow/lite/c:common"],
)
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_DATA_H_
#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_DATA_H_
#include "tensorflow/lite/c/common.h"
namespace tflite {
extern const int8_t kConvInput1x32x32x3[];
extern const int8_t kConvFilter8x3x3x3[];
extern const int32_t kConvBiasQuantized8[];
extern const int8_t kConvGoldenOutput1x16x16x8[];
} // namespace tflite
#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_DATA_H_
......@@ -794,9 +794,11 @@ ifneq ($(findstring $(EXPLICITLY_SPECIFIED_TEST),$(MICROLITE_TEST_SRCS)),)
MICROLITE_TEST_SRCS := $(filter-out $(EXPLICITLY_SPECIFIED_TEST), $(MICROLITE_TEST_SRCS))
EXPLICITLY_SPECIFIED_TEST_SRCS := \
$(EXPLICITLY_SPECIFIED_TEST) \
tensorflow/lite/micro/kernels/conv_test_common.cc
tensorflow/lite/micro/kernels/conv_test_common.cc \
tensorflow/lite/micro/kernels/testdata/conv_test_data.cc
EXPLICITLY_SPECIFIED_TEST_HDRS := \
tensorflow/lite/micro/kernels/conv_test.h
tensorflow/lite/micro/kernels/conv_test.h \
tensorflow/lite/micro/kernels/testdata/conv_test_data.h
$(eval $(call microlite_test,kernel_conv_test,\
$(EXPLICITLY_SPECIFIED_TEST_SRCS),$(EXPLICITLY_SPECIFIED_TEST_HDRS)))
endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册