diff --git a/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc b/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc index c97647d94cc1e339ca10d51488fbad64e46e1eef..3b7337b191e0a705091a0c3c07f043521eef7fe2 100644 --- a/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc +++ b/tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc @@ -225,10 +225,10 @@ TfLiteStatus EvalHifi(const XtensaSoftmaxOpData* op_data, } return kTfLiteOk; } -} // namespace - #endif // defined(FUSION_F1) || defined(HIFI5) +} // namespace + void* XtensaInitSoftmax(TfLiteContext* context, const char* buffer, size_t length) { #if defined(HIFIMINI) || defined(FUSION_F1) || defined(HIFI5) diff --git a/tensorflow/lite/micro/micro_allocator.cc b/tensorflow/lite/micro/micro_allocator.cc index 356131525b0b85719235da7e2f5c7b49aaf1faa3..02512ce229b5098e279fd3151d068b034f322c16 100644 --- a/tensorflow/lite/micro/micro_allocator.cc +++ b/tensorflow/lite/micro/micro_allocator.cc @@ -242,26 +242,6 @@ TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph, } } } - - // Sanity check for valid tensor lifetime. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - // Even though tensor appears to be read only it may still need to be - // allocated. - const bool appears_read_only = - (current->first_created == -1) && (current->last_used != -1); - const bool has_partial_lifetime = - !appears_read_only && - ((current->first_created == -1) || (current->last_used == -1)); - if (has_partial_lifetime && current->needs_allocating) { - TF_LITE_REPORT_ERROR( - reporter_, - "Logic error in memory planner, tensor %d has an invalid lifetime: " - "first_created: %d, last_used: %d", - i, current->first_created, current->last_used); - return kTfLiteError; - } - } return kTfLiteOk; } diff --git a/tensorflow/lite/micro/micro_allocator_test.cc b/tensorflow/lite/micro/micro_allocator_test.cc index 1c856ef0bfe86c4978b6310ff2bb9932c03665d8..83ef75b4003bb704925f95fcdde90fe88421cd4e 100644 --- a/tensorflow/lite/micro/micro_allocator_test.cc +++ b/tensorflow/lite/micro/micro_allocator_test.cc @@ -869,4 +869,35 @@ TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors) { 0, subgraph_allocations[0].tensors[5].data.uint8 - start); } +TF_LITE_MICRO_TEST(TestModelWithUnusedTensors) { + tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); + + const tflite::Model* model = tflite::testing::GetModelWithUnusedInputs(); + + tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; + constexpr size_t arena_size = 4096; + uint8_t arena[arena_size]; + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); + + tflite::SubgraphAllocations* subgraph_allocations = + allocator->StartModelAllocation(model); + TF_LITE_MICRO_EXPECT(nullptr != subgraph_allocations); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, allocator->FinishModelAllocation(model, subgraph_allocations, + &scratch_buffer_handles)); + + // Unused input tensor should not occupy any space. + uint8_t* start = subgraph_allocations[0].tensors[2].data.uint8; + TF_LITE_MICRO_EXPECT_EQ( + 64, subgraph_allocations[0].tensors[0].data.uint8 - start); + TF_LITE_MICRO_EXPECT_EQ( + 0, subgraph_allocations[0].tensors[1].data.uint8 - start); + TF_LITE_MICRO_EXPECT_EQ( + 0, subgraph_allocations[0].tensors[2].data.uint8 - start); + // Unused tensor should not occupy any space. + TF_LITE_MICRO_EXPECT_EQ( + 0, subgraph_allocations[0].tensors[3].data.uint8 - start); +} + TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/test_helpers.cc b/tensorflow/lite/micro/test_helpers.cc index 2f3962d1f35d544bf79de2a20b1881f36d366fd3..707840c7c64c151e19d3c8a00147ac0b8225dff2 100644 --- a/tensorflow/lite/micro/test_helpers.cc +++ b/tensorflow/lite/micro/test_helpers.cc @@ -341,6 +341,72 @@ const Model* BuildModelWithOfflinePlanning(int number_of_tensors, node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs); } +const Model* BuildModelWithUnusedInputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = {CreateBuffer(*builder)}; + constexpr size_t tensor_shape_size = 2; + const int32_t tensor_shape[tensor_shape_size] = {1, 64}; + constexpr size_t tensors_size = 4; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_output_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_tensor"), 0, false), + }; + constexpr size_t inputs_size = 2; + const int32_t inputs[inputs_size] = {0, 1}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {2}; + constexpr size_t operator_inputs_size = 1; + const int32_t operator_inputs[operator_inputs_size] = {0}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + const Model* BuildSimpleMockModel() { using flatbuffers::Offset; flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); @@ -953,6 +1019,13 @@ AllOpsResolver GetOpResolver() { MultipleInputs::GetMutableRegistration()); return op_resolver; } +const Model* GetModelWithUnusedInputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithUnusedInputs()); + } + return model; +} const Model* GetSimpleMockModel() { static Model* model = nullptr; diff --git a/tensorflow/lite/micro/test_helpers.h b/tensorflow/lite/micro/test_helpers.h index ea501ee47582b309cca84484022de3da698198df..994d7466c00a4639507dcb1e8803247652f6b422 100644 --- a/tensorflow/lite/micro/test_helpers.h +++ b/tensorflow/lite/micro/test_helpers.h @@ -126,6 +126,10 @@ const Model* GetModelWithOfflinePlanning(int num_tensors, int num_conns, int num_subgraph_inputs = 0); +// Returns a flatbuffer with a single operator, two inputs (one unused) and one +// output. +const Model* GetModelWithUnusedInputs(); + // Returns a flatbuffer model with `simple_stateful_op` const Model* GetSimpleStatefulModel(); diff --git a/tensorflow/lite/micro/testing/BUILD b/tensorflow/lite/micro/testing/BUILD index 3dea5752f1e52541b4e3fc787d4dfa8ccb8ab5b3..6d8c74dd7e84d3aa72309180f7e98e068b7d0995 100644 --- a/tensorflow/lite/micro/testing/BUILD +++ b/tensorflow/lite/micro/testing/BUILD @@ -1,15 +1,8 @@ -load("//tensorflow:extra_rules.bzl", "tflm_kernel_friends") - package( features = ["-layering_check"], licenses = ["notice"], ) -package_group( - name = "kernel_friends", - packages = tflm_kernel_friends(), -) - package_group( name = "micro", packages = ["//tensorflow/lite/micro/..."], @@ -26,10 +19,9 @@ cc_library( "micro_test.h", ], visibility = [ - ":kernel_friends", ":micro", ":microfrontend", - ], # TODO(b/188226023) + ], deps = [ "//tensorflow/lite/c:common", "//tensorflow/lite/core/api",