未验证 提交 f537895f 编写于 作者: T TFLM-bot 提交者: GitHub

Sync from upstream TF. (#90)

上级 560fd3d1
......@@ -225,10 +225,10 @@ TfLiteStatus EvalHifi(const XtensaSoftmaxOpData* op_data,
}
return kTfLiteOk;
}
} // namespace
#endif // defined(FUSION_F1) || defined(HIFI5)
} // namespace
void* XtensaInitSoftmax(TfLiteContext* context, const char* buffer,
size_t length) {
#if defined(HIFIMINI) || defined(FUSION_F1) || defined(HIFI5)
......
......@@ -242,26 +242,6 @@ TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph,
}
}
}
// Sanity check for valid tensor lifetime.
for (size_t i = 0; i < tensor_count_; ++i) {
AllocationInfo* current = &info_[i];
// Even though tensor appears to be read only it may still need to be
// allocated.
const bool appears_read_only =
(current->first_created == -1) && (current->last_used != -1);
const bool has_partial_lifetime =
!appears_read_only &&
((current->first_created == -1) || (current->last_used == -1));
if (has_partial_lifetime && current->needs_allocating) {
TF_LITE_REPORT_ERROR(
reporter_,
"Logic error in memory planner, tensor %d has an invalid lifetime: "
"first_created: %d, last_used: %d",
i, current->first_created, current->last_used);
return kTfLiteError;
}
}
return kTfLiteOk;
}
......
......@@ -869,4 +869,35 @@ TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors) {
0, subgraph_allocations[0].tensors[5].data.uint8 - start);
}
TF_LITE_MICRO_TEST(TestModelWithUnusedTensors) {
tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
const tflite::Model* model = tflite::testing::GetModelWithUnusedInputs();
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
tflite::SubgraphAllocations* subgraph_allocations =
allocator->StartModelAllocation(model);
TF_LITE_MICRO_EXPECT(nullptr != subgraph_allocations);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, allocator->FinishModelAllocation(model, subgraph_allocations,
&scratch_buffer_handles));
// Unused input tensor should not occupy any space.
uint8_t* start = subgraph_allocations[0].tensors[2].data.uint8;
TF_LITE_MICRO_EXPECT_EQ(
64, subgraph_allocations[0].tensors[0].data.uint8 - start);
TF_LITE_MICRO_EXPECT_EQ(
0, subgraph_allocations[0].tensors[1].data.uint8 - start);
TF_LITE_MICRO_EXPECT_EQ(
0, subgraph_allocations[0].tensors[2].data.uint8 - start);
// Unused tensor should not occupy any space.
TF_LITE_MICRO_EXPECT_EQ(
0, subgraph_allocations[0].tensors[3].data.uint8 - start);
}
TF_LITE_MICRO_TESTS_END
......@@ -341,6 +341,72 @@ const Model* BuildModelWithOfflinePlanning(int number_of_tensors,
node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs);
}
const Model* BuildModelWithUnusedInputs() {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
constexpr size_t buffers_size = 1;
const Offset<Buffer> buffers[buffers_size] = {CreateBuffer(*builder)};
constexpr size_t tensor_shape_size = 2;
const int32_t tensor_shape[tensor_shape_size] = {1, 64};
constexpr size_t tensors_size = 4;
const Offset<Tensor> tensors[tensors_size] = {
CreateTensor(*builder,
builder->CreateVector(tensor_shape, tensor_shape_size),
TensorType_INT8, 0,
builder->CreateString("test_input_tensor"), 0, false),
CreateTensor(*builder,
builder->CreateVector(tensor_shape, tensor_shape_size),
TensorType_INT8, 0,
builder->CreateString("test_unused_input_tensor"), 0, false),
CreateTensor(*builder,
builder->CreateVector(tensor_shape, tensor_shape_size),
TensorType_INT8, 0,
builder->CreateString("test_output_tensor"), 0, false),
CreateTensor(*builder,
builder->CreateVector(tensor_shape, tensor_shape_size),
TensorType_INT8, 0,
builder->CreateString("test_unused_tensor"), 0, false),
};
constexpr size_t inputs_size = 2;
const int32_t inputs[inputs_size] = {0, 1};
constexpr size_t outputs_size = 1;
const int32_t outputs[outputs_size] = {2};
constexpr size_t operator_inputs_size = 1;
const int32_t operator_inputs[operator_inputs_size] = {0};
constexpr size_t operator_outputs_size = 1;
const int32_t operator_outputs[operator_outputs_size] = {2};
constexpr size_t operators_size = 1;
const Offset<Operator> operators[operators_size] = {
CreateOperator(
*builder, 0,
builder->CreateVector(operator_inputs, operator_inputs_size),
builder->CreateVector(operator_outputs, operator_outputs_size),
BuiltinOptions_NONE),
};
constexpr size_t subgraphs_size = 1;
const Offset<SubGraph> subgraphs[subgraphs_size] = {
CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
builder->CreateVector(inputs, inputs_size),
builder->CreateVector(outputs, outputs_size),
builder->CreateVector(operators, operators_size),
builder->CreateString("test_subgraph"))};
constexpr size_t operator_codes_size = 1;
const Offset<OperatorCode> operator_codes[operator_codes_size] = {
CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
"mock_custom",
/*version=*/0, BuiltinOperator_CUSTOM)};
const Offset<Model> model_offset = CreateModel(
*builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
builder->CreateVector(subgraphs, subgraphs_size),
builder->CreateString("test_model"),
builder->CreateVector(buffers, buffers_size));
FinishModelBuffer(*builder, model_offset);
void* model_pointer = builder->GetBufferPointer();
const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
return model;
}
const Model* BuildSimpleMockModel() {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
......@@ -953,6 +1019,13 @@ AllOpsResolver GetOpResolver() {
MultipleInputs::GetMutableRegistration());
return op_resolver;
}
const Model* GetModelWithUnusedInputs() {
static Model* model = nullptr;
if (!model) {
model = const_cast<Model*>(BuildModelWithUnusedInputs());
}
return model;
}
const Model* GetSimpleMockModel() {
static Model* model = nullptr;
......
......@@ -126,6 +126,10 @@ const Model* GetModelWithOfflinePlanning(int num_tensors,
int num_conns,
int num_subgraph_inputs = 0);
// Returns a flatbuffer with a single operator, two inputs (one unused) and one
// output.
const Model* GetModelWithUnusedInputs();
// Returns a flatbuffer model with `simple_stateful_op`
const Model* GetSimpleStatefulModel();
......
load("//tensorflow:extra_rules.bzl", "tflm_kernel_friends")
package(
features = ["-layering_check"],
licenses = ["notice"],
)
package_group(
name = "kernel_friends",
packages = tflm_kernel_friends(),
)
package_group(
name = "micro",
packages = ["//tensorflow/lite/micro/..."],
......@@ -26,10 +19,9 @@ cc_library(
"micro_test.h",
],
visibility = [
":kernel_friends",
":micro",
":microfrontend",
], # TODO(b/188226023)
],
deps = [
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册