未验证 提交 f868a900 编写于 作者: P psharath1 提交者: GitHub

Added Support for Reshape int8 on Vision P6 (#1186)

* Added Support for Reshape int8 on Vision P6

Added Support for Reshape int8 on Vision P6

* Use C++ style comments
Co-authored-by: NTing Yan <94130036+tingyan19@users.noreply.github.com>
Co-authored-by: Nmergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
上级 f8ef5389
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <cstring>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
#include "tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h"
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
namespace ops {
namespace micro {
namespace reshape {
#if defined(VISION_P6)
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
void* data =
context->AllocatePersistentBuffer(context, sizeof(XtensaReshapeData));
if (InitXtensaContext()) {
return nullptr;
}
return data;
}
#endif // defined(VISION_P6)
TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) {
MicroContext* micro_context = GetMicroContext(context);
TfLiteTensor* input =
micro_context->AllocateTempInputTensor(node, kReshapeInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
TfLiteTensor* output =
micro_context->AllocateTempOutputTensor(node, kReshapeOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
// Tensorflow's Reshape allows one of the shape components to have the
// special -1 value, meaning it will be calculated automatically based on the
// input. Here we calculate what that dimension should be so that the number
// of output elements in the same as the number of input elements.
int num_input_elements = NumElements(input);
TfLiteIntArray* output_shape = output->dims;
if (NumInputs(node) == 1 && // Legacy scalar supported with params.
output_shape->size == 1 && output_shape->data[0] == 0) {
// Legacy tflite models use a shape parameter of [0] to indicate scalars,
// so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during
// toco conversion.
output_shape->size = 0;
}
int num_output_elements = 1;
int stretch_dim = -1;
for (int i = 0; i < output_shape->size; ++i) {
int value = output_shape->data[i];
if (value == -1) {
TF_LITE_ENSURE_EQ(context, stretch_dim, -1);
stretch_dim = i;
} else {
num_output_elements *= value;
}
}
if (stretch_dim != -1) {
output_shape->data[stretch_dim] = num_input_elements / num_output_elements;
num_output_elements *= output_shape->data[stretch_dim];
}
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements);
micro_context->DeallocateTempTfLiteTensor(input);
micro_context->DeallocateTempTfLiteTensor(output);
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_EQ(context, ReshapeOutput(context, node), kTfLiteOk);
#if defined(VISION_P6)
{
MicroContext* micro_context = GetMicroContext(context);
TfLiteTensor* input =
micro_context->AllocateTempInputTensor(node, kReshapeInputTensor);
// Vision P6 currently only supports upto 4D int8 input tensors
if (NumDimensions(input) <= 4 && input->type == kTfLiteInt8) {
TF_LITE_ENSURE_OK(context, ReshapePrepareVision(context, node));
}
micro_context->DeallocateTempTfLiteTensor(input);
}
#endif // VISION_P6
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteEvalTensor* input =
tflite::micro::GetEvalInput(context, node, kReshapeInputTensor);
TfLiteEvalTensor* output =
tflite::micro::GetEvalOutput(context, node, kReshapeOutputTensor);
// TODO(b/162522304): storing input bytes in OpData increases some models
// significantly, possibly due to alignment issues.
size_t input_bytes;
TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(input->type, &input_bytes));
input_bytes *= ElementCount(*input->dims);
// Do nothing for in-place reshape.
if (input->data.raw != output->data.raw) {
// Otherwise perform reshape with copy.
#if defined(VISION_P6)
// Vision P6 currently only supports upto 4D int8 input tensors
if (tflite::micro::GetTensorShape(input).DimensionsCount() <= 4 &&
input->type == kTfLiteInt8) {
XtensaReshapeData* op_data_xtensa =
static_cast<XtensaReshapeData*>(node->user_data);
ReshapeEvalVision(*op_data_xtensa, input, output);
} else {
#endif // VISION_P6
memcpy(output->data.raw, input->data.raw, input_bytes);
#if defined(VISION_P6)
}
#endif // VISION_P6
}
return kTfLiteOk;
}
} // namespace reshape
TfLiteRegistration Register_RESHAPE() {
#if defined(VISION_P6)
return tflite::micro::RegisterOp(reshape::Init, reshape::Prepare,
reshape::Eval);
#else
return tflite::micro::RegisterOp(nullptr, reshape::Prepare, reshape::Eval);
#endif
}
} // namespace micro
} // namespace ops
} // namespace tflite
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if defined(VISION_P6)
#include <cstdint>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/reduce.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/xtensa/xtensa.h"
#include "tensorflow/lite/micro/kernels/xtensa/xtensa_reshape.h"
namespace tflite {
inline void OperandDims4D(uint32_t* dims, TfLiteTensor* opnd) {
for (int i = NumDimensions(opnd) - 1, j = 0; i >= 0; i--, j++) {
dims[j] = SizeOfDimension(opnd, i);
}
return;
}
TfLiteStatus ReshapePrepareVision(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
XtensaReshapeData* data =
reinterpret_cast<XtensaReshapeData*>(node->user_data);
MicroContext* micro_context = GetMicroContext(context);
TfLiteTensor* input =
micro_context->AllocateTempInputTensor(node, kReshapeInputTensor);
uint32_t inputRank = NumDimensions(input);
uint32_t inputDims[4] = {1, 1, 1, 1};
OperandDims4D(inputDims, input);
uint32_t context_size = 0;
uint32_t status = xiReshapeGetMemReqd_Context(&context_size);
TFLITE_DCHECK(status == 0);
if (context_size) {
void* context_data =
context->AllocatePersistentBuffer(context, context_size);
if (context_data == nullptr) {
return kTfLiteError;
}
data->p_context = reinterpret_cast<uint8_t*>(context_data);
data->context_size = context_size;
}
status = xiReshapeSetContext(data->p_context, data->context_size, inputDims,
inputRank);
if (status) {
return kTfLiteError;
}
micro_context->DeallocateTempTfLiteTensor(input);
return kTfLiteOk;
}
TfLiteStatus ReshapeEvalVision(const XtensaReshapeData& data,
const TfLiteEvalTensor* input,
TfLiteEvalTensor* output) {
const uint32_t input_size = NumElements(input->dims);
const uint32_t output_size = NumElements(output->dims);
xiReshape(data.p_context, data.context_size,
const_cast<int8_t*>(tflite::micro::GetTensorData<int8_t>(input)),
input_size, tflite::micro::GetTensorData<int8_t>(output),
output_size);
return kTfLiteOk;
}
} // namespace tflite
#endif // defined(VISION_P6)
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_XTENSA_RESHAPE_H_
#define TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_XTENSA_RESHAPE_H_
#include <cstdint>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
constexpr int kReshapeInputTensor = 0;
constexpr int kReshapeOutputTensor = 0;
#if defined(VISION_P6)
struct XtensaReshapeData {
uint8_t* p_context; // persistent lib context for this instance saved here
uint32_t context_size;
};
#endif // VISION_P6
#if defined(VISION_P6)
TfLiteStatus ReshapePrepareVision(TfLiteContext* context, TfLiteNode* node);
TfLiteStatus ReshapeEvalVision(const XtensaReshapeData& data,
const TfLiteEvalTensor* input,
TfLiteEvalTensor* output);
#endif // VISION_P6
} // namespace tflite
#endif // TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_XTENSA_RESHAPE_H_
......@@ -12,6 +12,7 @@ MICROLITE_CC_KERNEL_SRCS += \
tensorflow/lite/micro/kernels/xtensa/pad_vision.cc \
tensorflow/lite/micro/kernels/xtensa/pooling_vision.cc \
tensorflow/lite/micro/kernels/xtensa/reduce_vision.cc \
tensorflow/lite/micro/kernels/xtensa/reshape_vision.cc \
tensorflow/lite/micro/kernels/xtensa/softmax_int8_int16.cc \
tensorflow/lite/micro/kernels/xtensa/softmax_vision.cc
......
......@@ -55,9 +55,9 @@ elif [[ ${2} == "hifi5" ]]; then
LIBRARY_DIRNAME="xa_nnlib_hifi5"
LIBRARY_MD5="0c832b15d27ac557fa5453c902c5662a"
elif [[ ${2} == "vision_p6" ]]; then
LIBRARY_URL="https://github.com/foss-xtensa/tflmlib_vision/raw/main/archive/xi_tflmlib_vision_p6_22_06_03.zip"
LIBRARY_URL="https://github.com/foss-xtensa/tflmlib_vision/raw/main/archive/xi_tflmlib_vision_p6_22_06_07.zip"
LIBRARY_DIRNAME="xi_tflmlib_vision_p6"
LIBRARY_MD5="cfa38ebc7d44e050708d984347a46b20"
LIBRARY_MD5="f5a0ea25f8c1b8073990725e4141378c"
else
echo "Attempting to download an unsupported xtensa variant: ${2}"
exit 1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册