未验证 提交 d25b2678 编写于 作者: S Shlomi Regev 提交者: GitHub

Provide a lightweight version of flexbuffers::Vector (#243)

- Save ~4KB of RAM by implementing a lightweight version of the vector, which only accesses integer values, without implicit conversion.
- Change Micro kernels to access flexbuffers as vectors instead of maps, which is recommended by the flexbuffers doc for efficiency. Since the values in the vector are ordered alphabetically by their keys, the kernels can access them by index instead.
- Revert detection_postprocess to the native flexbuffers API. The
LiteVector API doesn't support IsNull() and I prefer not to support two
custom flexbuffer APIs.
Co-authored-by: NNat Jeffries <natmjeffries@gmail.com>
上级 97856467
......@@ -108,6 +108,15 @@ cc_library(
],
)
cc_library(
name = "flatbuffer_utils",
srcs = ["flatbuffer_utils.cc"],
hdrs = ["flatbuffer_utils.h"],
deps = [
"@flatbuffers//:runtime_cc",
],
)
cc_library(
name = "memory_helpers",
srcs = ["memory_helpers.cc"],
......@@ -378,6 +387,21 @@ cc_test(
],
)
cc_test(
name = "flatbuffer_utils_test",
srcs = [
"flatbuffer_utils_test.cc",
],
tags = [
"nomsan", # TODO(b/192311485): See http://b/192311485#comment2
],
deps = [
":flatbuffer_utils",
":test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "memory_helpers_test",
srcs = [
......
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/flatbuffer_utils.h"
namespace tflite {
FlexbufferWrapper::FlexbufferWrapper(const uint8_t* buffer, size_t size)
: flexbuffers::Vector(flexbuffers::GetRoot(buffer, size).AsVector()) {}
int64_t FlexbufferWrapper::ElementAsInt64(size_t i) const {
const uint8_t* elem = data_ + i * byte_width_;
return ::flexbuffers::ReadInt64(elem, byte_width_);
}
uint64_t FlexbufferWrapper::ElementAsUInt64(size_t i) const {
const uint8_t* elem = data_ + i * byte_width_;
return ::flexbuffers::ReadUInt64(elem, byte_width_);
}
int32_t FlexbufferWrapper::ElementAsInt32(size_t i) const {
return static_cast<int32_t>(ElementAsInt64(i));
}
bool FlexbufferWrapper::ElementAsBool(size_t i) const {
return static_cast<bool>(ElementAsUInt64(i));
}
double FlexbufferWrapper::ElementAsDouble(size_t i) const {
const uint8_t* elem = data_ + i * byte_width_;
return ::flexbuffers::ReadDouble(elem, byte_width_);
}
float FlexbufferWrapper::ElementAsFloat(size_t i) const {
return static_cast<float>(FlexbufferWrapper::ElementAsDouble(i));
}
} // namespace tflite
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
#define THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "flatbuffers/flexbuffers.h"
namespace tflite {
// Kernels use flexbuffers::Map to pack their init parameters in a tflite file,
// with the parameter names as map keys and the parameter values as the
// corresponding map values.
// Accessing the map values using the flexbuffers:Map class is inline heavy,
// which can cause the code size to bloat beyond what's reasonable for a micro
// application. Use this class instead, when possible.
// FlexbufferWrapper takes advantage of the following properties of
// flexbuffers::Map:
// 1. It can be viewed as a flexbuffers::Vector of the values.
// 2. The values in the vector are ordered alphabetically by their keys.
// 3. All integer and Boolean values are stored as 64-bit numbers.
// 4. All floating point values are stored as double precision numbers.
// The properties are mentioned in the flexbuffers docs, but we rely on
// a unit test to catch design changes.
class FlexbufferWrapper : public flexbuffers::Vector {
public:
// Construct with a serialized flexbuffer 'buffer' of 'size' bytes
explicit FlexbufferWrapper(const uint8_t* buffer, size_t size);
int64_t ElementAsInt64(size_t i) const;
uint64_t ElementAsUInt64(size_t i) const;
int32_t ElementAsInt32(size_t i) const;
bool ElementAsBool(size_t i) const;
double ElementAsDouble(size_t i) const;
float ElementAsFloat(size_t i) const;
};
} // namespace tflite
#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/flatbuffer_utils.h"
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestFlexbufferWrapper) {
struct TestParam {
std::string name;
std::string type;
std::string value;
};
TestParam params[] = {
{"xyz", "Int", "613"},
{"Neuron", "Double", "13.22"},
{"angle", "Int", "300"},
{"llama", "Bool", "false"},
{"Curl", "Float", "0.232"},
{"aardvark", "Bool", "true"},
{"ghost", "Double", "0.0000000001"},
{"123stigma", "Bool", "true"},
};
// Index of elements sorted alphabetically by name
int params_sorted[] = {7, 4, 1, 5, 2, 6, 3, 0};
const int param_num = sizeof(params) / sizeof(params[0]);
flexbuffers::Builder fbb;
fbb.Map([&]() {
for (int i = 0; i < param_num; i++) {
const std::string& param_value = params[i].value;
if (params[i].type == "Int") {
fbb.Int(params[i].name.c_str(), std::stoi(param_value));
} else if (params[i].type == "Bool") {
fbb.Bool(params[i].name.c_str(), param_value == "true");
} else if (params[i].type == "Double") {
fbb.Double(params[i].name.c_str(), std::stod(param_value));
} else if (params[i].type == "Float") {
fbb.Float(params[i].name.c_str(), std::stof(param_value));
}
}
});
fbb.Finish();
const std::vector<uint8_t> buffer = fbb.GetBuffer();
tflite::FlexbufferWrapper wrapper(buffer.data(), buffer.size());
for (int i = 0; i < param_num; i++) {
std::string& param_value = params[params_sorted[i]].value;
if (params[params_sorted[i]].type == "Int") {
TF_LITE_MICRO_EXPECT(wrapper.ElementAsInt32(i) == std::stoi(param_value));
} else if (params[params_sorted[i]].type == "Bool") {
TF_LITE_MICRO_EXPECT(wrapper.ElementAsBool(i) == (param_value == "true"));
} else if (params[params_sorted[i]].type == "Double") {
TF_LITE_MICRO_EXPECT(wrapper.ElementAsDouble(i) ==
std::stod(param_value));
} else if (params[params_sorted[i]].type == "Float") {
TF_LITE_MICRO_EXPECT(wrapper.ElementAsFloat(i) == std::stof(param_value));
}
}
}
TF_LITE_MICRO_TESTS_END
......@@ -216,6 +216,7 @@ cc_library(
"//tensorflow/lite/kernels/internal:tensor",
"//tensorflow/lite/kernels/internal:types",
"//tensorflow/lite/micro:memory_helpers",
"//tensorflow/lite/micro:flatbuffer_utils",
"//tensorflow/lite/micro:micro_graph",
"//tensorflow/lite/micro:micro_utils",
"//tensorflow/lite/schema:schema_fbs",
......
......@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/flatbuffer_utils.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
/*
......@@ -54,6 +55,11 @@ namespace {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
// Indices into the init flexbuffer's vector.
// The parameter's name is in the comment that follows.
// Elements in the vectors are ordered alphabetically by parameter name.
constexpr int kCyclesMaxIndex = 0; // 'cycles_max'
// TODO(b/149795762): Add this to TfLiteStatus enum.
constexpr TfLiteStatus kTfLiteAbort = static_cast<TfLiteStatus>(-9);
......@@ -74,10 +80,8 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
if (buffer != nullptr && length > 0) {
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m =
tflite::micro::FlexbuffersWrapperGetRootAsMap(buffer_t, length);
op_data->cycles_max =
tflite::micro::FlexbuffersWrapperAsInt32(m, "cycles_max");
tflite::FlexbufferWrapper wrapper(buffer_t, length);
op_data->cycles_max = wrapper.ElementAsInt32(kCyclesMaxIndex);
} else {
op_data->cycles_max = 0;
}
......
......@@ -119,38 +119,29 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
OpData* op_data = nullptr;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
op_data = reinterpret_cast<OpData*>(
context->AllocatePersistentBuffer(context, sizeof(OpData)));
const flexbuffers::Map& m =
micro::FlexbuffersWrapperGetRootAsMap(buffer_t, length);
op_data->max_detections =
micro::FlexbuffersWrapperAsInt32(m, "max_detections");
op_data->max_classes_per_detection =
micro::FlexbuffersWrapperAsInt32(m, "max_classes_per_detection");
if (micro::FlexbuffersWrapperIsNull(m, "detections_per_class"))
op_data->max_detections = m["max_detections"].AsInt32();
op_data->max_classes_per_detection = m["max_classes_per_detection"].AsInt32();
if (m["detections_per_class"].IsNull())
op_data->detections_per_class = kNumDetectionsPerClass;
else
op_data->detections_per_class =
micro::FlexbuffersWrapperAsInt32(m, "detections_per_class");
if (micro::FlexbuffersWrapperIsNull(m, "use_regular_nms"))
op_data->detections_per_class = m["detections_per_class"].AsInt32();
if (m["use_regular_nms"].IsNull())
op_data->use_regular_non_max_suppression = false;
else
op_data->use_regular_non_max_suppression =
micro::FlexbuffersWrapperAsBool(m, "use_regular_nms");
op_data->use_regular_non_max_suppression = m["use_regular_nms"].AsBool();
op_data->non_max_suppression_score_threshold =
micro::FlexbuffersWrapperAsFloat(m, "nms_score_threshold");
op_data->intersection_over_union_threshold =
micro::FlexbuffersWrapperAsFloat(m, "nms_iou_threshold");
op_data->num_classes = micro::FlexbuffersWrapperAsInt32(m, "num_classes");
op_data->scale_values.y = micro::FlexbuffersWrapperAsFloat(m, "y_scale");
op_data->scale_values.x = micro::FlexbuffersWrapperAsFloat(m, "x_scale");
op_data->scale_values.h = micro::FlexbuffersWrapperAsFloat(m, "h_scale");
op_data->scale_values.w = micro::FlexbuffersWrapperAsFloat(m, "w_scale");
m["nms_score_threshold"].AsFloat();
op_data->intersection_over_union_threshold = m["nms_iou_threshold"].AsFloat();
op_data->num_classes = m["num_classes"].AsInt32();
op_data->scale_values.y = m["y_scale"].AsFloat();
op_data->scale_values.x = m["x_scale"].AsFloat();
op_data->scale_values.h = m["h_scale"].AsFloat();
op_data->scale_values.w = m["w_scale"].AsFloat();
return op_data;
}
......
......@@ -266,6 +266,7 @@ MICROLITE_BENCHMARK_SRCS := \
$(wildcard tensorflow/lite/micro/benchmarks/*benchmark.cc)
MICROLITE_TEST_SRCS := \
tensorflow/lite/micro/flatbuffer_utils_test.cc \
tensorflow/lite/micro/memory_arena_threshold_test.cc \
tensorflow/lite/micro/memory_helpers_test.cc \
tensorflow/lite/micro/micro_allocator_test.cc \
......
......@@ -66,6 +66,11 @@ EXCLUDED_TESTS := \
tensorflow/lite/micro/micro_allocator_test.cc \
tensorflow/lite/micro/memory_helpers_test.cc \
tensorflow/lite/micro/memory_arena_threshold_test.cc
# flatbuffer_utils_test is intentionaly disabled because the flexbuffer builder
# uses dynamic memory.
EXCLUDED_TESTS += tensorflow/lite/micro/flatbuffer_utils_test.cc
MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
EXCLUDED_EXAMPLE_TESTS := \
......
......@@ -86,6 +86,11 @@ EXCLUDED_TESTS := \
tensorflow/lite/micro/memory_helpers_test.cc \
tensorflow/lite/micro/memory_arena_threshold_test.cc \
tensorflow/lite/micro/recording_micro_allocator_test.cc
# flatbuffer_utils_test is intentionaly disabled because the flexbuffer builder
# uses dynamic memory.
EXCLUDED_TESTS += tensorflow/lite/micro/flatbuffer_utils_test.cc
MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
EXCLUDED_EXAMPLE_TESTS := \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册