未验证 提交 6fdade1a 编写于 作者: V Vamsi Krishna Manchala 提交者: GitHub

Update TFLM examples, kernels and integration tests to clean-up the usage of ErrorReporter

This PR updates remainder of the TFLM examples, all the kernels and integration tests to remove the usage of ErrorReporter as per the changes made to the TFLM Framework API in https://github.com/tensorflow/tflite-micro/pull/1415; replaces TF_LITE_REPORT_ERROR with MicroPrintf.

BUG=http://b/192091017, http://b/245802069
上级 e3989c49
......@@ -76,8 +76,8 @@ cc_test(
":person_detect_model_data",
":simple_images_test_data",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro/testing:micro_test",
"//tensorflow/lite/schema:schema_fbs",
......@@ -95,7 +95,6 @@ cc_library(
deps = [
":model_settings",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_error_reporter",
],
)
......@@ -108,7 +107,6 @@ cc_test(
":image_provider",
":model_settings",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro/testing:micro_test",
],
)
......@@ -123,7 +121,7 @@ cc_library(
],
deps = [
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_log",
],
)
......@@ -150,8 +148,8 @@ cc_binary(
":image_provider",
":model_settings",
":person_detect_model_data",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:system_setup",
"//tensorflow/lite/schema:schema_fbs",
......
......@@ -15,11 +15,12 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/person_detection/detection_responder.h"
#include "tensorflow/lite/micro/micro_log.h"
// This dummy implementation writes person and no person scores to the error
// console. Real applications will want to take some custom action instead, and
// should implement their own versions of this function.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score) {
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
person_score, no_person_score);
void RespondToDetection(int8_t person_score, int8_t no_person_score) {
MicroPrintf("person score:%d no person score %d", person_score,
no_person_score);
}
......@@ -20,7 +20,6 @@ limitations under the License.
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_DETECTION_RESPONDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// Called every time the results of a person detection run are available. The
// `person_score` has the numerical confidence that the captured image contains
......@@ -28,7 +27,6 @@ limitations under the License.
// does not contain a person. Typically if person_score > no person score, the
// image is considered to contain a person. This threshold may be adjusted for
// particular applications.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score);
void RespondToDetection(int8_t person_score, int8_t no_person_score);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_DETECTION_RESPONDER_H_
......@@ -20,13 +20,11 @@ limitations under the License.
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestCallability) {
tflite::MicroErrorReporter micro_error_reporter;
// This will have external side-effects (like printing to the debug console
// or lighting an LED) that are hard to observe, so the most we can do is
// make sure the call doesn't crash.
RespondToDetection(&micro_error_reporter, -100, 100);
RespondToDetection(&micro_error_reporter, 100, 50);
RespondToDetection(-100, 100);
RespondToDetection(100, 50);
}
TF_LITE_MICRO_TESTS_END
......@@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data) {
TfLiteStatus GetImage(int image_width, int image_height, int channels,
int8_t* image_data) {
for (int i = 0; i < image_width * image_height * channels; ++i) {
image_data[i] = 0;
}
......
......@@ -17,7 +17,6 @@ limitations under the License.
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// This is an abstraction around an image source like a camera, and is
// expected to return 8-bit sample data. The assumption is that this will be
......@@ -33,7 +32,7 @@ limitations under the License.
// The reference implementation can have no platform-specific dependencies, so
// it just returns a static image. For real applications, you should
// ensure there's a specialized implementation that accesses hardware APIs.
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data);
TfLiteStatus GetImage(int image_width, int image_height, int channels,
int8_t* image_data);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_
......@@ -19,17 +19,14 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestImageProvider) {
tflite::MicroErrorReporter micro_error_reporter;
int8_t image_data[kMaxImageSize];
TfLiteStatus get_status = GetImage(&micro_error_reporter, kNumCols, kNumRows,
kNumChannels, image_data);
TfLiteStatus get_status =
GetImage(kNumCols, kNumRows, kNumChannels, image_data);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
// Make sure we can read all of the returned memory locations.
......
......@@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/person_detection/detection_responder.h"
#include "tensorflow/lite/micro/examples/person_detection/image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/models/person_detect_model_data.h"
#include "tensorflow/lite/micro/system_setup.h"
......@@ -27,7 +27,6 @@ limitations under the License.
// Globals, used for compatibility with Arduino-style sketches.
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
......@@ -48,20 +47,14 @@ static uint8_t tensor_arena[kTensorArenaSize];
void setup() {
tflite::InitializeTarget();
// Set up logging. Google style is to avoid globals or statics because of
// lifetime uncertainty, but since this has a trivial destructor it's okay.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
MicroPrintf(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
......@@ -84,13 +77,13 @@ void setup() {
// Build an interpreter to run the model with.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
model, micro_op_resolver, tensor_arena, kTensorArenaSize);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
MicroPrintf("AllocateTensors() failed");
return;
}
......@@ -101,14 +94,14 @@ void setup() {
// The name of this function is important for Arduino compatibility.
void loop() {
// Get image from provider.
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
input->data.int8)) {
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
if (kTfLiteOk !=
GetImage(kNumCols, kNumRows, kNumChannels, input->data.int8)) {
MicroPrintf("Image capture failed.");
}
// Run the model on this input and make sure it succeeds.
if (kTfLiteOk != interpreter->Invoke()) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
MicroPrintf("Invoke failed.");
}
TfLiteTensor* output = interpreter->output(0);
......@@ -116,5 +109,5 @@ void loop() {
// Process the inference results.
int8_t person_score = output->data.uint8[kPersonIndex];
int8_t no_person_score = output->data.uint8[kNotAPersonIndex];
RespondToDetection(error_reporter, person_score, no_person_score);
RespondToDetection(person_score, no_person_score);
}
......@@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
#include "tensorflow/lite/micro/examples/person_detection/testdata/no_person_image_data.h"
#include "tensorflow/lite/micro/examples/person_detection/testdata/person_image_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/models/person_detect_model_data.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
......@@ -35,17 +35,14 @@ uint8_t tensor_arena[tensor_arena_size];
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestInvoke) {
// Set up logging.
tflite::MicroErrorReporter micro_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(&micro_error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
MicroPrintf(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// Pull in only the operation implementations we need.
......@@ -63,8 +60,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Build an interpreter to run the model with.
tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
tensor_arena_size,
&micro_error_reporter);
tensor_arena_size);
interpreter.AllocateTensors();
// Get information about the memory area to use for the model's input.
......@@ -86,7 +82,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this input and make sure it succeeds.
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(&micro_error_reporter, "Invoke failed\n");
MicroPrintf("Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
......@@ -101,9 +97,8 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Make sure that the expected "Person" score is higher than the other class.
int8_t person_score = output->data.int8[kPersonIndex];
int8_t no_person_score = output->data.int8[kNotAPersonIndex];
TF_LITE_REPORT_ERROR(&micro_error_reporter,
"person data. person score: %d, no person score: %d\n",
person_score, no_person_score);
MicroPrintf("person data. person score: %d, no person score: %d\n",
person_score, no_person_score);
TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score);
memcpy(input->data.int8, g_no_person_image_data, input->bytes);
......@@ -111,7 +106,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this "No Person" input.
invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(&micro_error_reporter, "Invoke failed\n");
MicroPrintf("Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
......@@ -126,13 +121,11 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Make sure that the expected "No Person" score is higher.
person_score = output->data.int8[kPersonIndex];
no_person_score = output->data.int8[kNotAPersonIndex];
TF_LITE_REPORT_ERROR(
&micro_error_reporter,
"no person data. person score: %d, no person score: %d\n", person_score,
no_person_score);
MicroPrintf("no person data. person score: %d, no person score: %d\n",
person_score, no_person_score);
TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score);
TF_LITE_REPORT_ERROR(&micro_error_reporter, "Ran successfully\n");
MicroPrintf("Ran successfully\n");
}
TF_LITE_MICRO_TESTS_END
......@@ -982,8 +982,8 @@ cc_binary(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -85,7 +85,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/add/add9_input0_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/add/add9_input1_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/add/add9_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -107,8 +107,7 @@ void RunModel(const uint8_t* model, const int16_t* input0,
MicroProfiler profiler;
MicroInterpreter interpreter(GetModel(model), AllOpsResolver(), tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input0_tensor = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input0_tensor->bytes, input0_size * sizeof(int16_t));
......
......@@ -954,8 +954,8 @@ cc_test(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -83,7 +83,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/conv/conv9_golden_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/conv/conv9_input_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/conv/conv9_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -104,8 +104,7 @@ void RunModel(const uint8_t* model, const int16_t* input,
MicroProfiler profiler;
AllOpsResolver resolver;
MicroInterpreter interpreter(GetModel(model), resolver, tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input_tensor = interpreter.input(0);
......
......@@ -996,8 +996,8 @@ cc_binary(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -86,7 +86,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/leaky_relu/leaky_relu9_golden_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/leaky_relu/leaky_relu9_input0_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/leaky_relu/leaky_relu9_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -107,8 +107,7 @@ void RunModel(const uint8_t* model, const int16_t* input0,
MicroProfiler profiler;
MicroInterpreter interpreter(GetModel(model), AllOpsResolver(), tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input0_tensor = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input0_tensor->bytes, input0_size * sizeof(int16_t));
......
......@@ -828,8 +828,8 @@ cc_test(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -74,7 +74,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/pad/pad9_golden_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/pad/pad9_input0_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/pad/pad9_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -96,8 +96,7 @@ void RunModel(const uint8_t* model, const int16_t* input0,
AllOpsResolver op_resolver;
MicroInterpreter interpreter(GetModel(model), op_resolver, tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input_tensor0 = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input_tensor0->bytes, input0_size * sizeof(int16_t));
......
......@@ -114,8 +114,8 @@ cc_test(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/quantize/quantize1_golden_int32_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/quantize/quantize1_input0_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/quantize/quantize1_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -46,8 +46,7 @@ void RunModel(const uint8_t* model, const inputT* input0,
AllOpsResolver op_resolver;
MicroInterpreter interpreter(GetModel(model), op_resolver, tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input_tensor0 = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input_tensor0->bytes, input0_size * sizeof(inputT));
......
......@@ -1458,8 +1458,8 @@ cc_binary(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -119,7 +119,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/strided_slice/strided_slice9_golden_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/strided_slice/strided_slice9_input0_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/strided_slice/strided_slice9_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -140,8 +140,7 @@ void RunModel(const uint8_t* model, const int16_t* input0,
MicroProfiler profiler;
MicroInterpreter interpreter(GetModel(model), AllOpsResolver(), tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input0_tensor = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input0_tensor->bytes, input0_size * sizeof(int16_t));
......
......@@ -310,8 +310,8 @@ cc_binary(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -37,7 +37,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/sub/sub4_input0_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/sub/sub4_input1_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/sub/sub4_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -59,8 +59,7 @@ void RunModel(const uint8_t* model, const int16_t* input0,
MicroProfiler profiler;
MicroInterpreter interpreter(GetModel(model), AllOpsResolver(), tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input0_tensor = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input0_tensor->bytes, input0_size * sizeof(int16_t));
......
......@@ -310,8 +310,8 @@ cc_binary(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -37,7 +37,7 @@ limitations under the License.
#include "tensorflow/lite/micro/integration_tests/seanet/transpose_conv/transpose_conv4_input0_int32_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/transpose_conv/transpose_conv4_input1_int16_test_data.h"
#include "tensorflow/lite/micro/integration_tests/seanet/transpose_conv/transpose_conv4_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -60,8 +60,7 @@ void RunModel(const uint8_t* model, const int32_t* input0,
AllOpsResolver op_resolver;
MicroInterpreter interpreter(GetModel(model), op_resolver, tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
nullptr, &profiler);
kTensorArenaSize, nullptr, &profiler);
interpreter.AllocateTensors();
TfLiteTensor* input0_tensor = interpreter.input(0);
TF_LITE_MICRO_EXPECT_EQ(input0_tensor->bytes, input0_size * sizeof(int32_t));
......
......@@ -77,8 +77,8 @@ cc_test(
copts = micro_copts(),
deps = [
":models_and_testdata",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:micro_log",
"//tensorflow/lite/micro:micro_resource_variable",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:recording_allocators",
......
......@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_profiler.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
......@@ -52,7 +52,7 @@ void RunModel(const uint8_t* model,
AllOpsResolver op_resolver;
MicroInterpreter interpreter(GetModel(model), op_resolver, tensor_arena,
kTensorArenaSize, GetMicroErrorReporter(),
kTensorArenaSize,
nullptr, &profiler);
interpreter.AllocateTensors();
% for input_idx, input in enumerate(inputs):
......
......@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -26,7 +26,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/add.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -33,6 +33,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -30,6 +30,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h"
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -31,6 +31,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h"
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h"
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -17,7 +17,7 @@ limitations under the License.
#include <math.h>
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -16,7 +16,7 @@ limitations under the License.
#include <math.h>
#include "mli_interface.h" // NOLINT
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#define KRNL_C_DIM_NHWC 0 // output channels
......
......@@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buf_mgr.h"
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/micro_utils.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -23,8 +23,8 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_graph.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_resource_variable.h"
#include "tensorflow/lite/schema/schema_generated.h"
......
......@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -27,6 +27,8 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h"
#include "tensorflow/lite/micro/kernels/conv.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#ifdef MCPS_MEASUREMENT
#include "tensorflow/lite/micro/kernels/ceva/mcps_macros.h"
#endif
......
......@@ -28,6 +28,8 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h"
#include "tensorflow/lite/micro/kernels/depthwise_conv.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#ifdef MCPS_MEASUREMENT
#include "tensorflow/lite/micro/kernels/ceva/mcps_macros.h"
#endif
......
......@@ -25,6 +25,8 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
// #define MCPS_MEASUREMENT
#ifdef MCPS_MEASUREMENT
#include "tensorflow/lite/micro/kernels/ceva/mcps_macros.h"
......
......@@ -27,6 +27,8 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/logistic.h"
#include "tensorflow/lite/micro/micro_log.h"
#ifdef MCPS_MEASUREMENT
#include "tensorflow/lite/micro/kernels/ceva/mcps_macros.h"
#endif
......
......@@ -26,6 +26,8 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/softmax.h"
#include "tensorflow/lite/micro/micro_log.h"
#ifdef MCPS_MEASUREMENT
#include "tensorflow/lite/micro/kernels/ceva/mcps_macros.h"
#endif
......
......@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/flatbuffer_utils.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
/*
* The circular buffer custom operator is used to implement strided streaming
......
......@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -27,6 +27,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -27,6 +27,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/micro/kernels/conv.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/mul.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/pooling.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -26,7 +26,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/activation_utils.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/dequantize.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_context.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
// OLD-TODO(b/117523611): We should factor out a binary_op and put binary ops
......
......@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/hard_swish.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -114,12 +114,10 @@ TF_LITE_MICRO_TEST(IfShouldInvokeSubgraphConditionTrue) {
const tflite::Model* model =
tflite::testing::GetSimpleModelWithSubgraphsAndIf();
tflite::MicroMutableOpResolver<3> resolver;
tflite::MicroErrorReporter reporter;
resolver.AddIf();
resolver.AddAdd();
resolver.AddMul();
tflite::MicroInterpreter interpreter(model, resolver, arena, kArenaSize,
&reporter);
tflite::MicroInterpreter interpreter(model, resolver, arena, kArenaSize);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors());
TfLiteTensor* condition = interpreter.input(0);
TfLiteTensor* input1 = interpreter.input(1);
......@@ -144,12 +142,10 @@ TF_LITE_MICRO_TEST(IfShouldInvokeSubgraphConditionFalse) {
const tflite::Model* model =
tflite::testing::GetSimpleModelWithSubgraphsAndIf();
tflite::MicroMutableOpResolver<3> resolver;
tflite::MicroErrorReporter reporter;
resolver.AddIf();
resolver.AddAdd();
resolver.AddMul();
tflite::MicroInterpreter interpreter(model, resolver, arena, kArenaSize,
&reporter);
tflite::MicroInterpreter interpreter(model, resolver, arena, kArenaSize);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors());
TfLiteTensor* condition = interpreter.input(0);
TfLiteTensor* input1 = interpreter.input(1);
......@@ -176,9 +172,7 @@ TF_LITE_MICRO_TEST(IfShouldNotOverwriteTensorAcrossSubgraphs) {
tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
op_resolver.AddIf();
tflite::MicroErrorReporter reporter;
tflite::MicroInterpreter interpreter(model, op_resolver, arena, kArenaSize,
&reporter);
tflite::MicroInterpreter interpreter(model, op_resolver, arena, kArenaSize);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.AllocateTensors());
TfLiteTensor* condition = interpreter.input(0);
......
......@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
#include "tensorflow/lite/micro/micro_arena_constants.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/test_helpers.h"
namespace tflite {
......@@ -39,8 +40,7 @@ KernelRunner::KernelRunner(const TfLiteRegistration& registration,
TfLiteIntArray* inputs, TfLiteIntArray* outputs,
void* builtin_data, TfLiteIntArray* intermediates)
: registration_(registration),
allocator_(SingleArenaBufferAllocator::Create(GetMicroErrorReporter(),
kKernelRunnerBuffer_,
allocator_(SingleArenaBufferAllocator::Create(kKernelRunnerBuffer_,
kKernelRunnerBufferSize_)),
mock_micro_graph_(allocator_),
fake_micro_context_(tensors, allocator_, &mock_micro_graph_) {
......
......@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace micro {
......
......@@ -24,7 +24,6 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/micro/micro_context.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
namespace tflite {
namespace micro {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/reference/l2normalization.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/leaky_relu.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/logistic.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace {
......
......@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/pooling.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/prelu.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/quantize.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -23,8 +23,8 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_graph.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_resource_variable.h"
#include "tensorflow/lite/schema/schema_generated.h"
......
......@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/reduce.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
namespace ops {
......
......@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
......@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
......
......@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
namespace tflite {
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册