未验证 提交 d0cdc6a8 编写于 作者: T Trinity Lundgren 提交者: GitHub

Add conv kernel unit tests (#1094)

* Add conv kernel test with 3x3 filter, 2x2 stride, and 4x4 EVEN size
  input (padding 'same').
* Add conv kernel test with 3x3 filter, 2x2 stride, and 5x5 ODD size
  input (padding 'same').

Both tests pass for the TFLM reference kernel for conv when run with:

$ make -f tensorflow/lite/micro/tools/make/Makefile \
  test_kernel_conv_test

When run for the xtensa kernel with:

$ make -f tensorflow/lite/micro/tools/make/Makefile TARGET=xtensa \
  TARGET_ARCH=vision_p6 OPTIMIZED_KERNEL_DIR=xtensa \
  XTENSA_CORE=${VP6_CORE} test_kernel_conv_test

The 4x4 even-size input fails and the 5x5 odd size input passes.

Note that the 4x4 test is currently excluded from the Xtensa tests by an
include guard. It must be included to reproduce the failure.

BUG=b/228102789
Co-authored-by: Nmergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
上级 1c2e49b1
......@@ -637,8 +637,210 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
tflite::Register_CONV_2D(), output_data));
}
// This test does not pass when run as part of the Xtensa conv test suite.
// Move test below '#endif // !defined(XTENSA)' guard below to reproduce.
TF_LITE_MICRO_TEST(Int8Filter1x3x3x1ShouldMatchGoldenEvenInputPaddingSame) {
using tflite::ElementCount;
using tflite::kConvFilter1x3x3x1;
using tflite::kConvGoldenOutput4x4InputPaddingSame2x2;
using tflite::kConvInput1x4x4x1;
using tflite::kConvZeroBias;
using tflite::testing::CreateTensor;
using tflite::testing::FloatArrayFromFloats;
using tflite::testing::IntArrayFromInts;
using tflite::testing::ValidateConvGoldens;
constexpr int kInDepth = 1;
constexpr int kOutDepth = 1;
// Input quantization parameters: same scale and zero point for all input
// elements.
constexpr float kInputScale = 0.00392120517f;
constexpr int kInputZeroPoint = -128;
float input_scales[] = {1, kInputScale};
int input_zero_points[] = {1, kInputZeroPoint};
TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales),
IntArrayFromInts(input_zero_points),
0};
// Create input tensor of size 1x4x4x1.
int input_shape[] = {4, 1, 4, 4, kInDepth};
TfLiteIntArray* input_dims = IntArrayFromInts(input_shape);
TfLiteTensor input_tensor = CreateTensor(kConvInput1x4x4x1, input_dims);
input_tensor.params = {kInputScale, kInputZeroPoint};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Filter quantization parameters.
int filter_zero_points[kOutDepth + 1] = {kOutDepth, 0};
float filter_scales[kOutDepth + 1] = {kOutDepth, 0.00448552053f};
TfLiteAffineQuantization filter_quant;
filter_quant.scale = FloatArrayFromFloats(filter_scales);
filter_quant.zero_point = IntArrayFromInts(filter_zero_points);
filter_quant.quantized_dimension = 0;
// Create filter tensor of size 1x3x3x1.
int filter_shape[] = {4, kOutDepth, 3, 3, kInDepth};
TfLiteIntArray* filter_dims = IntArrayFromInts(filter_shape);
TfLiteTensor filter_tensor = CreateTensor(kConvFilter1x3x3x1, filter_dims);
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Bias quantization parameters: same zero point, but different scale per
// output channel.
int bias_zero_points[kOutDepth + 1] = {kOutDepth, 0};
float bias_scales[kOutDepth + 1] = {kOutDepth, 0.00001758864f};
TfLiteAffineQuantization bias_quant;
bias_quant.scale = FloatArrayFromFloats(bias_scales);
bias_quant.zero_point = IntArrayFromInts(bias_zero_points);
bias_quant.quantized_dimension = 0;
// Create size 1 zero bias tensor.
int bias_shape[] = {1, kOutDepth};
TfLiteIntArray* bias_dims = IntArrayFromInts(bias_shape);
TfLiteTensor bias_tensor = CreateTensor(kConvZeroBias, bias_dims);
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Output quantization parameters: same zero point and scale for all elements.
const float output_scale = 0.00627814838f;
const int output_zero_point = -7;
float output_scales[] = {1, output_scale};
int output_zero_points[] = {1, output_zero_point};
TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales),
IntArrayFromInts(output_zero_points),
0};
// Create output tensor of 1x2x2x1.
int8_t output_data[4 * 2 * 2 * kOutDepth];
int output_shape[] = {4, 1, 2, 2, kOutDepth};
TfLiteIntArray* output_dims = IntArrayFromInts(output_shape);
const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor = CreateTensor(output_data, output_dims);
output_tensor.params = {output_scale, output_zero_point};
output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant};
// The 3 inputs include the input, filter and bias tensors.
constexpr int inputs_size = 3;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
input_tensor,
filter_tensor,
bias_tensor,
output_tensor,
};
TfLiteConvParams conv_params{tflite::testing::common_conv_params};
conv_params.padding = kTfLitePaddingSame;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, ValidateConvGoldens(tensors, tensors_size,
kConvGoldenOutput4x4InputPaddingSame2x2,
output_dims_count, &conv_params,
tflite::Register_CONV_2D(), output_data,
1.0 /* tolerance */));
}
#endif // !defined(XTENSA)
// Same as Int8Filter1x3x3x1ShouldMatchGoldenEvenInputPaddingSame, except with
// an odd-size input of 5x5 instead of of 4x4.
TF_LITE_MICRO_TEST(Int8Filter1x3x3x1ShouldMatchGoldenOddInputPaddingSame) {
using tflite::ElementCount;
using tflite::kConvFilter1x3x3x1;
using tflite::kConvGoldenOutput5x5InputPaddingSame3x3;
using tflite::kConvInput1x5x5x1;
using tflite::kConvZeroBias;
using tflite::testing::CreateTensor;
using tflite::testing::FloatArrayFromFloats;
using tflite::testing::IntArrayFromInts;
using tflite::testing::ValidateConvGoldens;
constexpr int kInDepth = 1;
constexpr int kOutDepth = 1;
// Input quantization parameters: same scale and zero point for all input
// elements.
constexpr float kInputScale = 0.00392120517f;
constexpr int kInputZeroPoint = -128;
float input_scales[] = {1, kInputScale};
int input_zero_points[] = {1, kInputZeroPoint};
TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales),
IntArrayFromInts(input_zero_points),
0};
// Create input tensor of size 1x5x5x1.
int input_shape[] = {4, 1, 5, 5, kInDepth};
TfLiteIntArray* input_dims = IntArrayFromInts(input_shape);
TfLiteTensor input_tensor = CreateTensor(kConvInput1x5x5x1, input_dims);
input_tensor.params = {kInputScale, kInputZeroPoint};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Filter quantization parameters.
int filter_zero_points[kOutDepth + 1] = {kOutDepth, 0};
float filter_scales[kOutDepth + 1] = {kOutDepth, 0.00448552053f};
TfLiteAffineQuantization filter_quant;
filter_quant.scale = FloatArrayFromFloats(filter_scales);
filter_quant.zero_point = IntArrayFromInts(filter_zero_points);
filter_quant.quantized_dimension = 0;
// Create filter tensor of size 1x3x3x1.
int filter_shape[] = {4, kOutDepth, 3, 3, kInDepth};
TfLiteIntArray* filter_dims = IntArrayFromInts(filter_shape);
TfLiteTensor filter_tensor = CreateTensor(kConvFilter1x3x3x1, filter_dims);
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Bias quantization parameters: same zero point, but different scale per
// output channel.
int bias_zero_points[kOutDepth + 1] = {kOutDepth, 0};
float bias_scales[kOutDepth + 1] = {kOutDepth, 0.00001758864f};
TfLiteAffineQuantization bias_quant;
bias_quant.scale = FloatArrayFromFloats(bias_scales);
bias_quant.zero_point = IntArrayFromInts(bias_zero_points);
bias_quant.quantized_dimension = 0;
// Create size 1 zero bias tensor.
int bias_shape[] = {1, kOutDepth};
TfLiteIntArray* bias_dims = IntArrayFromInts(bias_shape);
TfLiteTensor bias_tensor = CreateTensor(kConvZeroBias, bias_dims);
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Output quantization parameters: same zero point and scale for all elements.
const float output_scale = 0.00627814838f;
const int output_zero_point = -7;
float output_scales[] = {1, output_scale};
int output_zero_points[] = {1, output_zero_point};
TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales),
IntArrayFromInts(output_zero_points),
0};
// Create output tensor.
int8_t output_data[4 * 3 * 3 * kOutDepth];
int output_shape[] = {4, 1, 3, 3, kOutDepth};
TfLiteIntArray* output_dims = IntArrayFromInts(output_shape);
const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor = CreateTensor(output_data, output_dims);
output_tensor.params = {output_scale, output_zero_point};
output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant};
// The 3 inputs include the input, filter and bias tensors.
constexpr int inputs_size = 3;
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
input_tensor,
filter_tensor,
bias_tensor,
output_tensor,
};
TfLiteConvParams conv_params{tflite::testing::common_conv_params};
conv_params.padding = kTfLitePaddingSame;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, ValidateConvGoldens(tensors, tensors_size,
kConvGoldenOutput5x5InputPaddingSame3x3,
output_dims_count, &conv_params,
tflite::Register_CONV_2D(), output_data,
1.0 /* tolerance */));
}
TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
const int output_dims_count = 12;
int8_t output_data[output_dims_count];
......
......@@ -476,4 +476,29 @@ const int8_t kConvGoldenOutput1x16x16x8[1 * 16 * 16 * 8] = {
12, -118, -6, 33, -128, -43, -95, 49, 7, -117, -4, 32,
-128, -37, -95, 53, -7, -117, -1, 30};
// Conv Test Case: Int8Filter1x3x3x1ShouldMatchGolden
const int8_t kConvFilter1x3x3x1[1 * 3 * 3 * 1]{
22, -98, 78, -127, 101, 47, 87, 12, -15,
};
const int32_t kConvZeroBias[1] = {0};
// Kernel Conv Test Case: Int8Filter1x3x3x1ShouldMatchGoldenEvenInput
const int8_t kConvInput1x4x4x1[1 * 4 * 4 * 1]{
-127, -111, -95, -79, -63, -47, -31, -15, 1, 17, 33, 49, 65, 81, 97, 113,
};
// Conv Test Case: Int8Filter1x3x3x1ShouldMatchGoldenOddInput
const int8_t kConvInput1x5x5x1[1 * 5 * 5 * 1]{
-128, -111, -95, -79, -63, -47, -31, -15, 1, 17, 33, 49, 65,
81, 97, 113, 127, 100, 80, 60, 40, 20, 0, -20, -40};
// Conv Test Case: Int8Filter1x3x3x1ShouldMatchGoldenEvenInputPaddingSame
const int8_t kConvGoldenOutput4x4InputPaddingSame2x2[1 * 2 * 2 * 1] = {38, 24,
16, -58};
// Conv Test Case: Int8Filter1x3x3x1ShouldMatchGoldenOddInputPaddingSame
const int8_t kConvGoldenOutput5x5InputPaddingSame3x3[1 * 3 * 3 * 1] = {
-6, 25, 30, 58, 76, 7, 50, -11, -59};
} // namespace tflite
......@@ -23,6 +23,15 @@ extern const int8_t kConvInput1x32x32x3[];
extern const int8_t kConvFilter8x3x3x3[];
extern const int32_t kConvBiasQuantized8[];
extern const int8_t kConvGoldenOutput1x16x16x8[];
// Kernel Conv Test Cases: Int8Filter1x3x3x1ShouldMatchGolden
extern const int8_t kConvInput1x4x4x1[];
extern const int8_t kConvInput1x5x5x1[];
extern const int8_t kConvFilter1x3x3x1[];
extern const int32_t kConvZeroBias[];
extern const int8_t kConvGoldenOutput4x4InputPaddingSame2x2[];
extern const int8_t kConvGoldenOutput5x5InputPaddingSame3x3[];
} // namespace tflite
#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_DATA_H_
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册