未验证 提交 233478e6 编写于 作者: A Alexander Smorkalov 提交者: GitHub

Merge pull request #23600 from dkurt:flatbuffers_23.5.9

Update FlatBuffers source code to 23.5.9
Origin: https://github.com/google/flatbuffers/tree/v23.1.21
Origin: https://github.com/google/flatbuffers/tree/v23.5.9
......@@ -17,6 +17,7 @@
#ifndef FLATBUFFERS_ARRAY_H_
#define FLATBUFFERS_ARRAY_H_
#include <cstdint>
#include <memory>
#include "flatbuffers/base.h"
......@@ -37,7 +38,7 @@ template<typename T, uint16_t length> class Array {
public:
typedef uint16_t size_type;
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
typedef VectorConstIterator<T, return_type> const_iterator;
typedef VectorConstIterator<T, return_type, uoffset_t> const_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
// If T is a LE-scalar or a struct (!scalar_tag::value).
......@@ -158,11 +159,13 @@ template<typename T, uint16_t length> class Array {
// Specialization for Array[struct] with access using Offset<void> pointer.
// This specialization used by idl_gen_text.cpp.
template<typename T, uint16_t length> class Array<Offset<T>, length> {
template<typename T, uint16_t length, template<typename> class OffsetT>
class Array<OffsetT<T>, length> {
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
public:
typedef const void *return_type;
typedef uint16_t size_type;
const uint8_t *Data() const { return data_; }
......
......@@ -43,6 +43,7 @@
#include <vector>
#include <set>
#include <algorithm>
#include <limits>
#include <iterator>
#include <memory>
......@@ -139,8 +140,8 @@
#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
#define FLATBUFFERS_VERSION_MAJOR 23
#define FLATBUFFERS_VERSION_MINOR 1
#define FLATBUFFERS_VERSION_REVISION 21
#define FLATBUFFERS_VERSION_MINOR 5
#define FLATBUFFERS_VERSION_REVISION 9
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
namespace flatbuffers {
......@@ -233,12 +234,17 @@ namespace flatbuffers {
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
// Check for absl::string_view
#elif __has_include("absl/strings/string_view.h")
#include "absl/strings/string_view.h"
namespace flatbuffers {
typedef absl::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
#elif __has_include("absl/strings/string_view.h") && \
__has_include("absl/base/config.h") && \
(__cplusplus >= 201411)
#include "absl/base/config.h"
#if !defined(ABSL_USES_STD_STRING_VIEW)
#include "absl/strings/string_view.h"
namespace flatbuffers {
typedef absl::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
#endif
#endif
#endif // __has_include
#endif // !FLATBUFFERS_HAS_STRING_VIEW
......@@ -318,9 +324,11 @@ namespace flatbuffers {
// Also, using a consistent offset type maintains compatibility of serialized
// offset values between 32bit and 64bit systems.
typedef uint32_t uoffset_t;
typedef uint64_t uoffset64_t;
// Signed offsets for references that can go in both directions.
typedef int32_t soffset_t;
typedef int64_t soffset64_t;
// Offset/index used in v-tables, can be changed to uint8_t in
// format forks to save a bit of space if desired.
......@@ -329,7 +337,8 @@ typedef uint16_t voffset_t;
typedef uintmax_t largest_scalar_t;
// In 32bits, this evaluates to 2GB - 1
#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
#define FLATBUFFERS_MAX_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset_t>::max()
#define FLATBUFFERS_MAX_64_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset64_t>::max()
// The minimum size buffer that can be a valid flatbuffer.
// Includes the offset to the root table (uoffset_t), the offset to the vtable
......
......@@ -25,14 +25,33 @@ namespace flatbuffers {
// Wrapper for uoffset_t to allow safe template specialization.
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
template<typename T> struct Offset {
uoffset_t o;
template<typename T = void> struct Offset {
// The type of offset to use.
typedef uoffset_t offset_type;
offset_type o;
Offset() : o(0) {}
Offset(uoffset_t _o) : o(_o) {}
Offset<void> Union() const { return Offset<void>(o); }
Offset(const offset_type _o) : o(_o) {}
Offset<> Union() const { return o; }
bool IsNull() const { return !o; }
};
// Wrapper for uoffset64_t Offsets.
template<typename T = void> struct Offset64 {
// The type of offset to use.
typedef uoffset64_t offset_type;
offset_type o;
Offset64() : o(0) {}
Offset64(const offset_type offset) : o(offset) {}
Offset64<> Union() const { return o; }
bool IsNull() const { return !o; }
};
// Litmus check for ensuring the Offsets are the expected size.
static_assert(sizeof(Offset<>) == 4, "Offset has wrong size");
static_assert(sizeof(Offset64<>) == 8, "Offset64 has wrong size");
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
......@@ -75,35 +94,59 @@ template<typename T> struct IndirectHelper {
typedef T return_type;
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
static return_type Read(const uint8_t *p, const size_t i) {
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
}
static return_type Read(uint8_t *p, uoffset_t i) {
return Read(const_cast<const uint8_t *>(p), i);
static mutable_return_type Read(uint8_t *p, const size_t i) {
return reinterpret_cast<mutable_return_type>(
Read(const_cast<const uint8_t *>(p), i));
}
};
template<typename T> struct IndirectHelper<Offset<T>> {
// For vector of Offsets.
template<typename T, template<typename> class OffsetT>
struct IndirectHelper<OffsetT<T>> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(uoffset_t);
static return_type Read(const uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
typedef typename OffsetT<T>::offset_type offset_type;
static const offset_type element_stride = sizeof(offset_type);
static return_type Read(const uint8_t *const p, const offset_type i) {
// Offsets are relative to themselves, so first update the pointer to
// point to the offset location.
const uint8_t *const offset_location = p + i * element_stride;
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
// then determine the relative location from the offset location.
return reinterpret_cast<return_type>(
offset_location + ReadScalar<offset_type>(offset_location));
}
static mutable_return_type Read(uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<mutable_return_type>(p + ReadScalar<uoffset_t>(p));
static mutable_return_type Read(uint8_t *const p, const offset_type i) {
// Offsets are relative to themselves, so first update the pointer to
// point to the offset location.
uint8_t *const offset_location = p + i * element_stride;
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
// then determine the relative location from the offset location.
return reinterpret_cast<mutable_return_type>(
offset_location + ReadScalar<offset_type>(offset_location));
}
};
// For vector of structs.
template<typename T> struct IndirectHelper<const T *> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return reinterpret_cast<return_type>(p + i * sizeof(T));
static return_type Read(const uint8_t *const p, const size_t i) {
// Structs are stored inline, relative to the first struct pointer.
return reinterpret_cast<return_type>(p + i * element_stride);
}
static mutable_return_type Read(uint8_t *p, uoffset_t i) {
return reinterpret_cast<mutable_return_type>(p + i * sizeof(T));
static mutable_return_type Read(uint8_t *const p, const size_t i) {
// Structs are stored inline, relative to the first struct pointer.
return reinterpret_cast<mutable_return_type>(p + i * element_stride);
}
};
......@@ -130,23 +173,25 @@ inline bool BufferHasIdentifier(const void *buf, const char *identifier,
/// @cond FLATBUFFERS_INTERNAL
// Helpers to get a typed pointer to the root object contained in the buffer.
template<typename T> T *GetMutableRoot(void *buf) {
if (!buf) return nullptr;
EndianCheck();
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(buf) +
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
}
template<typename T> T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) +
sizeof(uoffset_t));
template<typename T, typename SizeT = uoffset_t>
T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) + sizeof(SizeT));
}
template<typename T> const T *GetRoot(const void *buf) {
return GetMutableRoot<T>(const_cast<void *>(buf));
}
template<typename T> const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
template<typename T, typename SizeT = uoffset_t>
const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(SizeT));
}
} // namespace flatbuffers
......
......@@ -76,8 +76,9 @@ inline const uint8_t *GetBufferStartFromRootPointer(const void *root) {
}
/// @brief This return the prefixed size of a FlatBuffer.
inline uoffset_t GetPrefixedSize(const uint8_t *buf) {
return ReadScalar<uoffset_t>(buf);
template<typename SizeT = uoffset_t>
inline SizeT GetPrefixedSize(const uint8_t *buf) {
return ReadScalar<SizeT>(buf);
}
// Base class for native objects (FlatBuffer data de-serialized into native
......
......@@ -41,15 +41,18 @@
#include <optional>
#endif
// The __cpp_lib_span is the predefined feature macro.
#if defined(FLATBUFFERS_USE_STD_SPAN)
#include <span>
#elif defined(__cpp_lib_span) && defined(__has_include)
#if __has_include(<span>)
#include <array>
#include <span>
#define FLATBUFFERS_USE_STD_SPAN
#ifndef FLATBUFFERS_USE_STD_SPAN
// Testing __cpp_lib_span requires including either <version> or <span>,
// both of which were added in C++20.
// See: https://en.cppreference.com/w/cpp/utility/feature_test
#if defined(__cplusplus) && __cplusplus >= 202002L
#define FLATBUFFERS_USE_STD_SPAN 1
#endif
#endif // FLATBUFFERS_USE_STD_SPAN
#if defined(FLATBUFFERS_USE_STD_SPAN)
#include <array>
#include <span>
#else
// Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined.
#if !defined(FLATBUFFERS_TEMPLATES_ALIASES)
......
......@@ -47,14 +47,24 @@ class Table {
return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
}
template<typename P> P GetPointer(voffset_t field) {
template<typename P, typename OffsetSize = uoffset_t>
P GetPointer(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
return field_offset ? reinterpret_cast<P>(p + ReadScalar<OffsetSize>(p))
: nullptr;
}
template<typename P> P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P>(field);
template<typename P, typename OffsetSize = uoffset_t>
P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P, OffsetSize>(field);
}
template<typename P> P GetPointer64(voffset_t field) {
return GetPointer<P, uoffset64_t>(field);
}
template<typename P> P GetPointer64(voffset_t field) const {
return GetPointer<P, uoffset64_t>(field);
}
template<typename P> P GetStruct(voffset_t field) const {
......@@ -131,15 +141,25 @@ class Table {
}
// Versions for offsets.
template<typename OffsetT = uoffset_t>
bool VerifyOffset(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return !field_offset || verifier.VerifyOffset(data_, field_offset);
return !field_offset || verifier.VerifyOffset<OffsetT>(data_, field_offset);
}
template<typename OffsetT = uoffset_t>
bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyOffset(data_, field_offset);
verifier.VerifyOffset<OffsetT>(data_, field_offset);
}
bool VerifyOffset64(const Verifier &verifier, voffset_t field) const {
return VerifyOffset<uoffset64_t>(verifier, field);
}
bool VerifyOffset64Required(const Verifier &verifier, voffset_t field) const {
return VerifyOffsetRequired<uoffset64_t>(verifier, field);
}
private:
......
......@@ -27,7 +27,8 @@ struct String;
// An STL compatible iterator implementation for Vector below, effectively
// calling Get() for every element.
template<typename T, typename IT, typename Data = uint8_t *>
template<typename T, typename IT, typename Data = uint8_t *,
typename SizeT = uoffset_t>
struct VectorIterator {
typedef std::random_access_iterator_tag iterator_category;
typedef IT value_type;
......@@ -35,8 +36,9 @@ struct VectorIterator {
typedef IT *pointer;
typedef IT &reference;
VectorIterator(Data data, uoffset_t i)
: data_(data + IndirectHelper<T>::element_stride * i) {}
static const SizeT element_stride = IndirectHelper<T>::element_stride;
VectorIterator(Data data, SizeT i) : data_(data + element_stride * i) {}
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
VectorIterator() : data_(nullptr) {}
......@@ -63,7 +65,7 @@ struct VectorIterator {
}
difference_type operator-(const VectorIterator &other) const {
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
return (data_ - other.data_) / element_stride;
}
// Note: return type is incompatible with the standard
......@@ -75,44 +77,42 @@ struct VectorIterator {
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
VectorIterator &operator++() {
data_ += IndirectHelper<T>::element_stride;
data_ += element_stride;
return *this;
}
VectorIterator operator++(int) {
VectorIterator temp(data_, 0);
data_ += IndirectHelper<T>::element_stride;
data_ += element_stride;
return temp;
}
VectorIterator operator+(const uoffset_t &offset) const {
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
0);
VectorIterator operator+(const SizeT &offset) const {
return VectorIterator(data_ + offset * element_stride, 0);
}
VectorIterator &operator+=(const uoffset_t &offset) {
data_ += offset * IndirectHelper<T>::element_stride;
VectorIterator &operator+=(const SizeT &offset) {
data_ += offset * element_stride;
return *this;
}
VectorIterator &operator--() {
data_ -= IndirectHelper<T>::element_stride;
data_ -= element_stride;
return *this;
}
VectorIterator operator--(int) {
VectorIterator temp(data_, 0);
data_ -= IndirectHelper<T>::element_stride;
data_ -= element_stride;
return temp;
}
VectorIterator operator-(const uoffset_t &offset) const {
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
0);
VectorIterator operator-(const SizeT &offset) const {
return VectorIterator(data_ - offset * element_stride, 0);
}
VectorIterator &operator-=(const uoffset_t &offset) {
data_ -= offset * IndirectHelper<T>::element_stride;
VectorIterator &operator-=(const SizeT &offset) {
data_ -= offset * element_stride;
return *this;
}
......@@ -120,8 +120,8 @@ struct VectorIterator {
Data data_;
};
template<typename T, typename IT>
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *>;
template<typename T, typename IT, typename SizeT = uoffset_t>
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *, SizeT>;
template<typename Iterator>
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
......@@ -145,11 +145,14 @@ struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
// This is used as a helper type for accessing vectors.
// Vector::data() assumes the vector elements start after the length field.
template<typename T> class Vector {
template<typename T, typename SizeT = uoffset_t> class Vector {
public:
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
typedef VectorIterator<T,
typename IndirectHelper<T>::mutable_return_type,
uint8_t *, SizeT>
iterator;
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type>
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type,
SizeT>
const_iterator;
typedef VectorReverseIterator<iterator> reverse_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
......@@ -160,39 +163,41 @@ template<typename T> class Vector {
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
uoffset_t size() const { return EndianScalar(length_); }
SizeT size() const { return EndianScalar(length_); }
// Deprecated: use size(). Here for backwards compatibility.
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
uoffset_t Length() const { return size(); }
SizeT Length() const { return size(); }
typedef SizeT size_type;
typedef typename IndirectHelper<T>::return_type return_type;
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
typedef typename IndirectHelper<T>::mutable_return_type
mutable_return_type;
typedef return_type value_type;
return_type Get(uoffset_t i) const {
return_type Get(SizeT i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<T>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
return_type operator[](SizeT i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
template<typename E> E GetEnum(SizeT i) const {
return static_cast<E>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is the right type!
template<typename U> const U *GetAs(uoffset_t i) const {
template<typename U> const U *GetAs(SizeT i) const {
return reinterpret_cast<const U *>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is actually a string!
const String *GetAsString(uoffset_t i) const {
const String *GetAsString(SizeT i) const {
return reinterpret_cast<const String *>(Get(i));
}
......@@ -226,7 +231,7 @@ template<typename T> class Vector {
// Change elements if you have a non-const pointer to this object.
// Scalars only. See reflection.h, and the documentation.
void Mutate(uoffset_t i, const T &val) {
void Mutate(SizeT i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
......@@ -234,15 +239,15 @@ template<typename T> class Vector {
// Change an element of a vector of tables (or strings).
// "val" points to the new table/string, as you can obtain from
// e.g. reflection::AddFlatBuffer().
void MutateOffset(uoffset_t i, const uint8_t *val) {
void MutateOffset(SizeT i, const uint8_t *val) {
FLATBUFFERS_ASSERT(i < size());
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
static_assert(sizeof(T) == sizeof(SizeT), "Unrelated types");
WriteScalar(data() + i,
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
static_cast<SizeT>(val - (Data() + i * sizeof(SizeT))));
}
// Get a mutable pointer to tables/strings inside this vector.
mutable_return_type GetMutableObject(uoffset_t i) const {
mutable_return_type GetMutableObject(SizeT i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
}
......@@ -280,7 +285,7 @@ template<typename T> class Vector {
// try to construct these manually.
Vector();
uoffset_t length_;
SizeT length_;
private:
// This class is a pointer. Copying will therefore create an invalid object.
......@@ -299,6 +304,8 @@ template<typename T> class Vector {
}
};
template<typename T> using Vector64 = Vector<T, uoffset64_t>;
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
FLATBUFFERS_NOEXCEPT {
......
......@@ -17,6 +17,8 @@
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
#include <cstdint>
#include <algorithm>
#include "flatbuffers/base.h"
......@@ -31,13 +33,15 @@ namespace flatbuffers {
// Since this vector leaves the lower part unused, we support a "scratch-pad"
// that can be stored there for temporary data, to share the allocated space.
// Essentially, this supports 2 std::vectors in a single buffer.
class vector_downward {
template<typename SizeT = uoffset_t> class vector_downward {
public:
explicit vector_downward(size_t initial_size, Allocator *allocator,
bool own_allocator, size_t buffer_minalign)
bool own_allocator, size_t buffer_minalign,
const SizeT max_size = FLATBUFFERS_MAX_BUFFER_SIZE)
: allocator_(allocator),
own_allocator_(own_allocator),
initial_size_(initial_size),
max_size_(max_size),
buffer_minalign_(buffer_minalign),
reserved_(0),
size_(0),
......@@ -50,6 +54,7 @@ class vector_downward {
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
initial_size_(other.initial_size_),
max_size_(other.max_size_),
buffer_minalign_(other.buffer_minalign_),
reserved_(other.reserved_),
size_(other.size_),
......@@ -111,7 +116,7 @@ class vector_downward {
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
auto *buf = buf_;
allocated_bytes = reserved_;
offset = static_cast<size_t>(cur_ - buf_);
offset = vector_downward::offset();
// release_raw only relinquishes the buffer ownership.
// Does not deallocate or reset the allocator. Destructor will do that.
......@@ -136,10 +141,10 @@ class vector_downward {
size_t ensure_space(size_t len) {
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
// Beyond this, signed offsets may not have enough range:
// (FlatBuffers > 2GB not supported).
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
// If the length is larger than the unused part of the buffer, we need to
// grow.
if (len > unused_buffer_size()) { reallocate(len); }
FLATBUFFERS_ASSERT(size() < max_size_);
return len;
}
......@@ -147,7 +152,7 @@ class vector_downward {
if (len) {
ensure_space(len);
cur_ -= len;
size_ += static_cast<uoffset_t>(len);
size_ += static_cast<SizeT>(len);
}
return cur_;
}
......@@ -155,11 +160,17 @@ class vector_downward {
// Returns nullptr if using the DefaultAllocator.
Allocator *get_custom_allocator() { return allocator_; }
inline uoffset_t size() const { return size_; }
// The current offset into the buffer.
size_t offset() const { return cur_ - buf_; }
uoffset_t scratch_size() const {
return static_cast<uoffset_t>(scratch_ - buf_);
}
// The total size of the vector (both the buffer and scratch parts).
inline SizeT size() const { return size_; }
// The size of the buffer part of the vector that is currently unused.
SizeT unused_buffer_size() const { return static_cast<SizeT>(cur_ - scratch_); }
// The size of the scratch part of the vector.
SizeT scratch_size() const { return static_cast<SizeT>(scratch_ - buf_); }
size_t capacity() const { return reserved_; }
......@@ -211,7 +222,7 @@ class vector_downward {
void pop(size_t bytes_to_remove) {
cur_ += bytes_to_remove;
size_ -= static_cast<uoffset_t>(bytes_to_remove);
size_ -= static_cast<SizeT>(bytes_to_remove);
}
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
......@@ -224,6 +235,7 @@ class vector_downward {
swap(buffer_minalign_, other.buffer_minalign_);
swap(reserved_, other.reserved_);
swap(size_, other.size_);
swap(max_size_, other.max_size_);
swap(buf_, other.buf_);
swap(cur_, other.cur_);
swap(scratch_, other.scratch_);
......@@ -243,9 +255,12 @@ class vector_downward {
Allocator *allocator_;
bool own_allocator_;
size_t initial_size_;
// The maximum size the vector can be.
SizeT max_size_;
size_t buffer_minalign_;
size_t reserved_;
uoffset_t size_;
SizeT size_;
uint8_t *buf_;
uint8_t *cur_; // Points at location between empty (below) and used (above).
uint8_t *scratch_; // Points to the end of the scratchpad in use.
......
......@@ -34,12 +34,16 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
bool check_alignment = true;
// If true, run verifier on nested flatbuffers
bool check_nested_flatbuffers = true;
// The maximum size of a buffer.
size_t max_size = FLATBUFFERS_MAX_BUFFER_SIZE;
// Use assertions to check for errors.
bool assert = false;
};
explicit Verifier(const uint8_t *const buf, const size_t buf_len,
const Options &opts)
: buf_(buf), size_(buf_len), opts_(opts) {
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
FLATBUFFERS_ASSERT(size_ < opts.max_size);
}
// Deprecated API, please construct with Verifier::Options.
......@@ -58,7 +62,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
bool Check(const bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
FLATBUFFERS_ASSERT(ok);
if (opts_.assert) { FLATBUFFERS_ASSERT(ok); }
#endif
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
if (!ok)
......@@ -113,41 +117,43 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
}
// Verify a pointer (may be NULL) of any vector type.
template<typename T> bool VerifyVector(const Vector<T> *const vec) const {
return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec),
sizeof(T));
template<int &..., typename T, typename LenT>
bool VerifyVector(const Vector<T, LenT> *const vec) const {
return !vec || VerifyVectorOrString<LenT>(
reinterpret_cast<const uint8_t *>(vec), sizeof(T));
}
// Verify a pointer (may be NULL) of a vector to struct.
template<typename T>
bool VerifyVector(const Vector<const T *> *const vec) const {
return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
template<int &..., typename T, typename LenT>
bool VerifyVector(const Vector<const T *, LenT> *const vec) const {
return VerifyVector(reinterpret_cast<const Vector<T, LenT> *>(vec));
}
// Verify a pointer (may be NULL) to string.
bool VerifyString(const String *const str) const {
size_t end;
return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str),
1, &end) &&
return !str || (VerifyVectorOrString<uoffset_t>(
reinterpret_cast<const uint8_t *>(str), 1, &end) &&
Verify(end, 1) && // Must have terminator
Check(buf_[end] == '\0')); // Terminating byte must be 0.
}
// Common code between vectors and strings.
template<typename LenT = uoffset_t>
bool VerifyVectorOrString(const uint8_t *const vec, const size_t elem_size,
size_t *const end = nullptr) const {
const auto veco = static_cast<size_t>(vec - buf_);
const auto vec_offset = static_cast<size_t>(vec - buf_);
// Check we can read the size field.
if (!Verify<uoffset_t>(veco)) return false;
if (!Verify<LenT>(vec_offset)) return false;
// Check the whole array. If this is a string, the byte past the array must
// be 0.
const auto size = ReadScalar<uoffset_t>(vec);
const auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
const LenT size = ReadScalar<LenT>(vec);
const auto max_elems = opts_.max_size / elem_size;
if (!Check(size < max_elems))
return false; // Protect against byte_size overflowing.
const auto byte_size = sizeof(size) + elem_size * size;
if (end) *end = veco + byte_size;
return Verify(veco, byte_size);
const auto byte_size = sizeof(LenT) + elem_size * size;
if (end) *end = vec_offset + byte_size;
return Verify(vec_offset, byte_size);
}
// Special case for string contents, after the above has been called.
......@@ -203,7 +209,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
}
// Call T::Verify, which must be in the generated code for this type.
const auto o = VerifyOffset(start);
const auto o = VerifyOffset<uoffset_t>(start);
return Check(o != 0) &&
reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
// clang-format off
......@@ -214,8 +220,8 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
// clang-format on
}
template<typename T>
bool VerifyNestedFlatBuffer(const Vector<uint8_t> *const buf,
template<typename T, int &..., typename SizeT>
bool VerifyNestedFlatBuffer(const Vector<uint8_t, SizeT> *const buf,
const char *const identifier) {
// Caller opted out of this.
if (!opts_.check_nested_flatbuffers) return true;
......@@ -226,7 +232,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
// If there is a nested buffer, it must be greater than the min size.
if (!Check(buf->size() >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
Verifier nested_verifier(buf->data(), buf->size());
Verifier nested_verifier(buf->data(), buf->size(), opts_);
return nested_verifier.VerifyBuffer<T>(identifier);
}
......@@ -237,29 +243,30 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
return VerifyBufferFromStart<T>(identifier, 0);
}
template<typename T>
template<typename T, typename SizeT = uoffset_t>
bool VerifySizePrefixedBuffer(const char *const identifier) {
return Verify<uoffset_t>(0U) &&
Check(ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t)) &&
VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
return Verify<SizeT>(0U) &&
Check(ReadScalar<SizeT>(buf_) == size_ - sizeof(SizeT)) &&
VerifyBufferFromStart<T>(identifier, sizeof(SizeT));
}
uoffset_t VerifyOffset(const size_t start) const {
if (!Verify<uoffset_t>(start)) return 0;
const auto o = ReadScalar<uoffset_t>(buf_ + start);
template<typename OffsetT = uoffset_t, typename SOffsetT = soffset_t>
size_t VerifyOffset(const size_t start) const {
if (!Verify<OffsetT>(start)) return 0;
const auto o = ReadScalar<OffsetT>(buf_ + start);
// May not point to itself.
if (!Check(o != 0)) return 0;
// Can't wrap around / buffers are max 2GB.
if (!Check(static_cast<soffset_t>(o) >= 0)) return 0;
// Can't wrap around larger than the max size.
if (!Check(static_cast<SOffsetT>(o) >= 0)) return 0;
// Must be inside the buffer to create a pointer from it (pointer outside
// buffer is UB).
if (!Verify(start + o, 1)) return 0;
return o;
}
uoffset_t VerifyOffset(const uint8_t *const base,
const voffset_t start) const {
return VerifyOffset(static_cast<size_t>(base - buf_) + start);
template<typename OffsetT = uoffset_t>
size_t VerifyOffset(const uint8_t *const base, const voffset_t start) const {
return VerifyOffset<OffsetT>(static_cast<size_t>(base - buf_) + start);
}
// Called at the start of a table to increase counters measuring data
......@@ -312,6 +319,12 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
std::vector<uint8_t> *flex_reuse_tracker_ = nullptr;
};
// Specialization for 64-bit offsets.
template<>
inline size_t Verifier::VerifyOffset<uoffset64_t>(const size_t start) const {
return VerifyOffset<uoffset64_t, soffset64_t>(start);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_
if(WITH_FLATBUFFERS)
set(HAVE_FLATBUFFERS 1)
set(flatbuffers_VERSION "23.1.21")
set(flatbuffers_VERSION "23.5.9")
ocv_install_3rdparty_licenses(flatbuffers "${OpenCV_SOURCE_DIR}/3rdparty/flatbuffers/LICENSE.txt")
ocv_add_external_target(flatbuffers "${OpenCV_SOURCE_DIR}/3rdparty/flatbuffers/include" "" "HAVE_FLATBUFFERS=1")
set(CUSTOM_STATUS_flatbuffers " Flatbuffers:" "builtin/3rdparty (${flatbuffers_VERSION})")
......
......@@ -9,8 +9,8 @@
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
FLATBUFFERS_VERSION_MINOR == 1 &&
FLATBUFFERS_VERSION_REVISION == 21,
FLATBUFFERS_VERSION_MINOR == 5 &&
FLATBUFFERS_VERSION_REVISION == 9,
"Non-compatible flatbuffers version included");
namespace opencv_tflite {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册