未验证 提交 27917855 编写于 作者: S Shlomi Regev 提交者: GitHub

Check for the case where the Operators vector in a tflite model is missing (#245)

* Check for the case where the Operators vector in a tflite model is missing

Some of our tools remove the vector if it's empty (i.e. the subgraph doesn't contain ops). The check is added until the tools are fixed.

BUG=http://b/192589496

* Move NumSubgraphOperators to flatbuffer_utils.h/cc

* Fix the CI errors.

* Remove `#define FLATBUFFERS_LOCALE_INDEPENDENT 0`

We will handle that via the Makefile instead.

* Add  -DFLATBUFFERS_LOCALE_INDEPENDENT=0 to makefile.

* remove references to flatbuffer from kernel_utils.cc/h

* Fix the build.

* run buildifier
Co-authored-by: NAdvait Jain <advaitjain@google.com>
Co-authored-by: NAdvait Jain <advaitjain@users.noreply.github.com>
上级 380e5bdd
......@@ -92,6 +92,7 @@ cc_library(
],
copts = micro_copts(),
deps = [
":flatbuffer_utils",
":memory_helpers",
":micro_compatibility",
":micro_error_reporter",
......@@ -113,6 +114,7 @@ cc_library(
srcs = ["flatbuffer_utils.cc"],
hdrs = ["flatbuffer_utils.h"],
deps = [
"//tensorflow/lite/schema:schema_fbs",
"@flatbuffers//:runtime_cc",
],
)
......
......@@ -72,7 +72,6 @@ cc_binary(
],
copts = [
"-Werror",
"-Wdouble-promotion",
"-Wsign-compare",
],
deps = [
......
......@@ -142,7 +142,6 @@ cc_binary(
],
copts = [
"-Werror",
"-Wdouble-promotion",
"-Wsign-compare",
],
deps = [
......
......@@ -47,4 +47,18 @@ float FlexbufferWrapper::ElementAsFloat(size_t i) const {
return static_cast<float>(FlexbufferWrapper::ElementAsDouble(i));
}
// TODO(b/192589496): Ops must always be there. Remove this function when fixed
uint32_t NumSubgraphOperators(const SubGraph* subgraph) {
if (subgraph->operators() != nullptr) {
return subgraph->operators()->size();
} else {
return 0;
}
}
// TODO(b/192589496): Ops must always be there. Remove this function when fixed
uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx) {
const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx);
return NumSubgraphOperators(subgraph);
}
} // namespace tflite
......@@ -17,7 +17,10 @@ limitations under the License.
#define THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
// Kernels use flexbuffers::Map to pack their init parameters in a tflite file,
......@@ -46,6 +49,10 @@ class FlexbufferWrapper : public flexbuffers::Vector {
float ElementAsFloat(size_t i) const;
};
// Return the number of operators in a subgraph tflite
uint32_t NumSubgraphOperators(const SubGraph* subgraph);
uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx);
} // namespace tflite
#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
......@@ -105,7 +105,6 @@ cc_library(
"//tensorflow/lite/kernels/internal:compatibility",
"//tensorflow/lite/kernels/internal:types",
"//tensorflow/lite/micro:debug_log",
"@flatbuffers//:runtime_cc",
],
)
......
......@@ -12,9 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include <numeric>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
......
......@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/builtin_op_data.h"
......
......@@ -20,27 +20,6 @@ limitations under the License.
namespace tflite {
namespace micro {
const flexbuffers::Map FlexbuffersWrapperGetRootAsMap(const uint8_t* buffer,
size_t size) {
return flexbuffers::GetRoot(buffer, size).AsMap();
}
int32_t FlexbuffersWrapperAsInt32(const flexbuffers::Map& m, const char* key) {
return m[key].AsInt32();
}
bool FlexbuffersWrapperAsBool(const flexbuffers::Map& m, const char* key) {
return m[key].AsBool();
}
float FlexbuffersWrapperAsFloat(const flexbuffers::Map& m, const char* key) {
return m[key].AsFloat();
}
bool FlexbuffersWrapperIsNull(const flexbuffers::Map& m, const char* key) {
return m[key].IsNull();
}
bool HaveSameShapes(const TfLiteEvalTensor* input1,
const TfLiteEvalTensor* input2) {
TFLITE_DCHECK(input1 != nullptr);
......
......@@ -18,8 +18,6 @@ limitations under the License.
#include <cstdint>
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
......@@ -28,20 +26,6 @@ limitations under the License.
namespace tflite {
namespace micro {
// The Flexbuffer library is inline heavy, which causes code bloat when
// custom ops are used. Wrapping with a function is a portable way to avoid
// this bloat
const flexbuffers::Map FlexbuffersWrapperGetRootAsMap(const uint8_t* buffer,
size_t size);
int32_t FlexbuffersWrapperAsInt32(const flexbuffers::Map& m, const char* key);
bool FlexbuffersWrapperAsBool(const flexbuffers::Map& m, const char* key);
float FlexbuffersWrapperAsFloat(const flexbuffers::Map& m, const char* key);
bool FlexbuffersWrapperIsNull(const flexbuffers::Map& m, const char* key);
// Returns a mutable tensor for a given input index. is_variable must be checked
// during prepare when the full TfLiteTensor is available.
inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context,
......
......@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
#define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include <cstddef>
#include <cstdint>
......
......@@ -211,6 +211,8 @@ TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph,
}
}
uint32_t operators_size = NumSubgraphOperators(subgraph);
for (size_t i = 0; i < subgraph->inputs()->size(); ++i) {
const int tensor_index = subgraph->inputs()->Get(i);
AllocationInfo* current = &info_[tensor_index];
......@@ -221,11 +223,11 @@ TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph,
for (size_t i = 0; i < subgraph->outputs()->size(); ++i) {
const int tensor_index = subgraph->outputs()->Get(i);
AllocationInfo* current = &info_[tensor_index];
current->last_used = subgraph->operators()->size() - 1;
current->last_used = operators_size - 1;
}
// Figure out when the first and last use of each tensor is.
for (int i = (subgraph->operators()->size() - 1); i >= 0; --i) {
for (int i = (operators_size - 1); i >= 0; --i) {
const auto* op = subgraph->operators()->Get(i);
for (size_t n = 0; n < op->inputs()->size(); ++n) {
const int tensor_index = op->inputs()->Get(n);
......@@ -742,10 +744,12 @@ TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations(
const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx);
TFLITE_DCHECK(subgraph != nullptr);
uint32_t operators_size = NumSubgraphOperators(subgraph);
// Initialize NodeAndRegistrations for the subgraph.
NodeAndRegistration* output = reinterpret_cast<NodeAndRegistration*>(
memory_allocator_->AllocateFromTail(
sizeof(NodeAndRegistration) * subgraph->operators()->size(),
sizeof(NodeAndRegistration) * operators_size,
alignof(NodeAndRegistration)));
if (output == nullptr) {
TF_LITE_REPORT_ERROR(
......
......@@ -15,14 +15,16 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
#define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include <cstddef>
#include <cstdint>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/micro/compatibility.h"
#include "tensorflow/lite/micro/flatbuffer_utils.h"
#include "tensorflow/lite/micro/simple_memory_allocator.h"
#include "tensorflow/lite/schema/schema_generated.h"
......
......@@ -13,11 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "tensorflow/lite/micro/micro_graph.h"
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/micro/flatbuffer_utils.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_profiler.h"
......@@ -57,9 +60,8 @@ TfLiteStatus MicroGraph::InitSubgraphs() {
for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
subgraph_idx++) {
current_subgraph_index_ = subgraph_idx;
const SubGraph* subgraph = (*subgraphs_)[subgraph_idx];
for (size_t i = 0; i < subgraph->operators()->size(); ++i) {
uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
for (size_t i = 0; i < operators_size; ++i) {
TfLiteNode* node =
&(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
const TfLiteRegistration* registration =
......@@ -92,9 +94,8 @@ TfLiteStatus MicroGraph::PrepareSubgraphs() {
for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
subgraph_idx++) {
current_subgraph_index_ = subgraph_idx;
const SubGraph* subgraph = (*subgraphs_)[subgraph_idx];
for (size_t i = 0; i < subgraph->operators()->size(); ++i) {
uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
for (size_t i = 0; i < operators_size; ++i) {
TfLiteNode* node =
&(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
const TfLiteRegistration* registration =
......@@ -123,8 +124,8 @@ TfLiteStatus MicroGraph::FreeSubgraphs() {
for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size();
subgraph_idx++) {
current_subgraph_index_ = subgraph_idx;
const SubGraph* subgraph = (*subgraphs_)[subgraph_idx];
for (size_t i = 0; i < subgraph->operators()->size(); ++i) {
uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
for (size_t i = 0; i < operators_size; ++i) {
TfLiteNode* node =
&(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
const TfLiteRegistration* registration =
......@@ -152,9 +153,8 @@ TfLiteStatus MicroGraph::InvokeSubgraph(int subgraph_idx) {
subgraph_idx, subgraphs_->size());
return kTfLiteError;
}
const SubGraph* subgraph = (*subgraphs_)[subgraph_idx];
for (size_t i = 0; i < subgraph->operators()->size(); ++i) {
uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx);
for (size_t i = 0; i < operators_size; ++i) {
TfLiteNode* node =
&(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node);
const TfLiteRegistration* registration = subgraph_allocations_[subgraph_idx]
......
......@@ -60,7 +60,7 @@ class MicroGraph {
// Get the specified input tensor of a specified subgraph in the model.
virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int input_idx);
// Number of tensor outputs to a specified subgraph in the model.
// Number of tensor outputs from a specified subgraph in the model.
virtual size_t NumSubgraphOutputs(int subgraph_idx);
// Get the specified output tensor of a specified subgraph in the model.
......
......@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/tensor_utils.h"
#include "tensorflow/lite/micro/flatbuffer_utils.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_allocator.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
......@@ -96,7 +97,8 @@ TfLiteStatus MicroInterpreter::PrepareNodeAndRegistrationDataFromFlatbuffer() {
auto* opcodes = model_->operator_codes();
BuiltinDataAllocator* builtin_data_allocator =
allocator_.GetBuiltinDataAllocator();
for (size_t i = 0; i < subgraph->operators()->size(); ++i) {
uint32_t operators_size = NumSubgraphOperators(subgraph);
for (size_t i = 0; i < operators_size; ++i) {
const auto* op = subgraph->operators()->Get(i);
const size_t index = op->opcode_index();
if (index >= opcodes->size()) {
......
......@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_
#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include <cstddef>
#include <cstdint>
......@@ -110,10 +112,6 @@ class MicroInterpreter {
TfLiteStatus initialization_status() const { return initialization_status_; }
size_t operators_size() const {
return model_->subgraphs()->Get(0)->operators()->size();
}
// Populates node and registration pointers representing the inference graph
// of the model from values inside the flatbuffer (loaded from the TfLiteModel
// instance). Persistent data (e.g. operator data) is allocated from the
......
......@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
#define TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include <cstdio>
#include <cstring>
......
......@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_
#define TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
......
......@@ -16,6 +16,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_
#define TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_allocator.h"
#include "tensorflow/lite/micro/micro_graph.h"
......
......@@ -16,7 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
#define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
// Useful functions for writing tests.
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#include <cstdint>
#include <limits>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册