diff --git a/model_zoo/official/lite/.gitignore b/model_zoo/official/lite/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..59c69550a370c116e608d5ba7d297de65d7ed2df --- /dev/null +++ b/model_zoo/official/lite/.gitignore @@ -0,0 +1,81 @@ +# MindSpore +build/ +mindspore/lib +output +*.ir +mindspore/ccsrc/schema/inner/* + +# Cmake files +CMakeFiles/ +cmake_install.cmake +CMakeCache.txt +Makefile +cmake-build-debug + +# Dynamic libraries +*.so +*.so.* +*.dylib + +# Static libraries +*.la +*.lai +*.a +*.lib + +# Protocol buffers +*_pb2.py +*.pb.h +*.pb.cc + +# Object files +*.o + +# Editor +.vscode +.idea/ + +# Cquery +.cquery_cached_index/ +compile_commands.json + +# Ctags and cscope +tags +TAGS +CTAGS +GTAGS +GRTAGS +GSYMS +GPATH +cscope.* + +# Python files +*__pycache__* +.pytest_cache + +# Mac files +*.DS_Store + +# Test results +test_temp_summary_event_file/ +*.dot +*.dat +*.svg +*.perf +*.info +*.ckpt +*.shp +*.pkl +.clangd +mindspore/version.py +mindspore/default_config.py +mindspore/.commit_id +onnx.proto +mindspore/ccsrc/onnx.proto + +# Android +local.properties +.gradle +sdk/build +sdk/.cxx +app/.cxx diff --git a/model_zoo/official/lite/README.md b/model_zoo/official/lite/README.md new file mode 100644 index 0000000000000000000000000000000000000000..71312f7d131576326062792e5770810b34d04353 --- /dev/null +++ b/model_zoo/official/lite/README.md @@ -0,0 +1,275 @@ +## MindSpore Lite 端侧图像分类demo(Android) + +本示例程序演示了如何在端侧利用MindSpore Lite C++ API(Android JNI)以及MindSpore Lite 图像分类模型完成端侧推理,实现对设备摄像头捕获的内容进行分类,并在App图像预览界面中显示出最可能的分类结果。 + + +### 运行依赖 + +- Android Studio >= 3.2 (推荐4.0以上版本) +- NDK 21.3 +- CMake 3.10 +- Android SDK >= 26 +- OpenCV >= 4.0.0 + +### 构建与运行 + +1. 在Android Studio中加载本示例源码,并安装相应的SDK(指定SDK版本后,由Android Studio自动安装)。 + + ![start_home](images/home.png) + + 启动Android Studio后,点击`File->Settings->System Settings->Android SDK`,勾选相应的SDK。如下图所示,勾选后,点击`OK`,Android Studio即可自动安装SDK。 + + ![start_sdk](images/sdk_management.png) + + (可选)若安装时出现NDK版本问题,可手动下载相应的[NDK版本](https://developer.android.com/ndk/downloads?hl=zh-cn)(本示例代码使用的NDK版本为21.3),并在`Project Structure`的`Android NDK location`设置中指定SDK的位置。 + + ![project_structure](images/project_structure.png) + +2. 连接Android设备,运行图像分类应用程序。 + + 通过USB连接Android设备调试,点击`Run 'app'`即可在您的设备上运行本示例项目。 + + * 注:编译过程中Android Studio会自动下载MindSpore Lite、OpenCV、模型文件等相关依赖项,编译过程需做耐心等待。 + + ![run_app](images/run_app.PNG) + + Android Studio连接设备调试操作,可参考。 + +3. 在Android设备上,点击“继续安装”,安装完即可查看到设备摄像头捕获的内容和推理结果。 + + ![install](images/install.jpg) + + 如下图所示,识别出的概率最高的物体是植物。 + + ![result](images/app_result.jpg) + + +## 示例程序详细说明 + +本端侧图像分类Android示例程序分为JAVA层和JNI层,其中,JAVA层主要通过Android Camera 2 API实现摄像头获取图像帧,以及相应的图像处理等功能;JNI层在[Runtime](https://www.mindspore.cn/tutorial/zh-CN/master/use/lite_runtime.html)中完成模型推理的过程。 + +> 此处详细说明示例程序的JNI层实现,JAVA层运用Android Camera 2 API实现开启设备摄像头以及图像帧处理等功能,需读者具备一定的Android开发基础知识。 + +### 示例程序结构 + +``` +app +| +├── libs # 存放demo jni层依赖的库文件 +│ └── arm64-v8a +│ ├── libopencv_java4.so # opencv +│ ├── libmlkit-label-MS.so # ndk编译生成的库文件 +│ └── libmindspore-lite.so # mindspore lite +| +├── src/main +│ ├── assets # 资源文件 +| | └── mobilenetv2.ms # 存放模型文件 +│ | +│ ├── cpp # 模型加载和预测主要逻辑封装类 +| | ├── include # 存放MindSpore调用相关的头文件 +| | | └── ... +│ | | +| | ├── MindSporeNetnative.cpp # MindSpore调用相关的JNI方法 +│ | └── MindSporeNetnative.h # 头文件 +│ | +│ ├── java # java层应用代码 +│ │ └── com.huawei.himindsporedemo +│ │ ├── gallery.classify # 图像处理及MindSpore JNI调用相关实现 +│ │ │ └── ... +│ │ └── obejctdetect # 开启摄像头及绘制相关实现 +│ │ └── ... +│ │ +│ ├── res # 存放Android相关的资源文件 +│ └── AndroidManifest.xml # Android配置文件 +│ +├── CMakeList.txt # cmake编译入口文件 +│ +├── build.gradle # 其他Android配置文件 +├── download.gradle # APP构建时由gradle自动从HuaWei Server下载依赖的库文件及模型文件 +└── ... +``` + +### 配置MindSpore Lite依赖项 + +Android JNI层调用MindSpore C++ API时,需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/docs/zh-CN/master/deploy.html)生成`libmindspore-lite.so`库文件,或直接下载MindSpore Lite提供的已编译完成的AMR64、ARM32、x86等[软件包](#TODO)。 + +在Android Studio中将编译完成的`libmindspore-lite.so`库文件(可包含多个兼容架构),分别放置在APP工程的`app/libs/arm64-v8a`(ARM64)或`app/libs/armeabi-v7a`(ARM32)目录下,并在应用的`build.gradle`文件中配置CMake编译支持,以及`arm64-v8a`和`armeabi-v7a`的编译支持。 + +本示例中,build过程由download.gradle文件自动从华为服务器下载libmindspore-lite.so以及OpenCV的libopencv_java4.so库文件,并放置在`app/libs/arm64-v8a`目录下。 + +* 注:若自动下载失败,请手动下载相关库文件并将其放在对应位置: +* libmindspore-lite.so [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) +* libopencv_java4.so [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/opencv%204.4.0/libopencv_java4.so) + +``` +android{ + defaultConfig{ + externalNativeBuild{ + cmake{ + arguments "-DANDROID_STL=c++_shared" + } + } + + ndk{ + abiFilters 'arm64-v8a' + } + } +} +``` + +在`app/CMakeLists.txt`文件中建立`.so`库文件链接,如下所示。 + +``` +# Set MindSpore Lite Dependencies. +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore) +add_library(mindspore-lite SHARED IMPORTED ) +set_target_properties(mindspore-lite PROPERTIES + IMPORTED_LOCATION "${CMAKE_SOURCE_DIR}/libs/libmindspore-lite.so") + +# Set OpenCV Dependecies. +include_directories(${CMAKE_SOURCE_DIR}/opencv/sdk/native/jni/include) +add_library(lib-opencv SHARED IMPORTED ) +set_target_properties(lib-opencv PROPERTIES + IMPORTED_LOCATION "${CMAKE_SOURCE_DIR}/libs/libopencv_java4.so") + +# Link target library. +target_link_libraries( + ... + mindspore-lite + lib-opencv + ... +) +``` + +### 下载及部署模型文件 + +从MindSpore Model Hub中下载模型文件,本示例程序中使用的终端图像分类模型文件为`mobilenetv2.ms`,同样通过download.gradle脚本在APP构建时自动下载,并放置在`app/src/main/assets`工程目录下。 + +* 注:若下载失败请手动下载模型文件,mobilenetv2.ms [下载链接](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ms)。 + + +### 编写端侧推理代码 + +在JNI层调用MindSpore Lite C++ API实现端测推理。 + +推理代码流程如下,完整代码请参见`src/cpp/MindSporeNetnative.cpp`。 + +1. 加载MindSpore Lite模型文件,构建上下文、会话以及用于推理的计算图。 + + - 加载模型文件:创建并配置用于模型推理的上下文 + ```cpp + // Buffer is the model data passed in by the Java layer + jlong bufferLen = env->GetDirectBufferCapacity(buffer); + char *modelBuffer = CreateLocalModelBuffer(env, buffer); + ``` + + - 创建会话 + ```cpp + void **labelEnv = new void *; + MSNetWork *labelNet = new MSNetWork; + *labelEnv = labelNet; + + // Create context. + lite::Context *context = new lite::Context; + context->thread_num_ = numThread; //Specify the number of threads to run inference + + // Create the mindspore session. + labelNet->CreateSessionMS(modelBuffer, bufferLen, context); + delete(context); + + ``` + + - 加载模型文件并构建用于推理的计算图 + ```cpp + void MSNetWork::CreateSessionMS(char* modelBuffer, size_t bufferLen, mindspore::lite::Context* ctx) + { + CreateSession(modelBuffer, bufferLen, ctx); + session = mindspore::session::LiteSession::CreateSession(ctx); + auto model = mindspore::lite::Model::Import(modelBuffer, bufferLen); + int ret = session->CompileGraph(model); // Compile Graph + } + ``` + +2. 将输入图片转换为传入MindSpore模型的Tensor格式。 + + 将待检测图片数据转换为输入MindSpore模型的Tensor。 + + ```cpp + // Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing + BitmapToMat(env, srcBitmap, matImageSrc); + // Processing such as zooming the picture size. + matImgPreprocessed = PreProcessImageData(matImageSrc); + + ImgDims inputDims; + inputDims.channel = matImgPreprocessed.channels(); + inputDims.width = matImgPreprocessed.cols; + inputDims.height = matImgPreprocessed.rows; + float *dataHWC = new float[inputDims.channel * inputDims.width * inputDims.height] + + // Copy the image data to be detected to the dataHWC array. + // The dataHWC[image_size] array here is the intermediate variable of the input MindSpore model tensor. + float *ptrTmp = reinterpret_cast(matImgPreprocessed.data); + for(int i = 0; i < inputDims.channel * inputDims.width * inputDims.height; i++){ + dataHWC[i] = ptrTmp[i]; + } + + // Assign dataHWC[image_size] to the input tensor variable. + auto msInputs = mSession->GetInputs(); + auto inTensor = msInputs.front(); + memcpy(inTensor->MutableData(), dataHWC, + inputDims.channel * inputDims.width * inputDims.height * sizeof(float)); + delete[] (dataHWC); + ``` + +3. 对输入Tensor按照模型进行推理,获取输出Tensor,并进行后处理。 + + - 图执行,端测推理。 + + ```cpp + // After the model and image tensor data is loaded, run inference. + auto status = mSession->RunGraph(); + ``` + + - 获取输出数据。 + ```cpp + // Get the mindspore inference results. + auto msOutputs = mSession->GetOutputMapByNode(); + std::string retStr = ProcessRunnetResult(msOutputs); + ``` + + - 输出数据的后续处理。 + ```cpp + std::string ProcessRunnetResult( + std::unordered_map> msOutputs){ + + // Get the branch of the model output. + // Use iterators to get map elements. + std::unordered_map>::iterator iter; + iter = msOutputs.begin(); + + // The mobilenetv2.ms model output just one branch. + auto outputString = iter->first; + auto outputTensor = iter->second; + + float *temp_scores = static_cast(branch1_tensor[0]->MutableData()); + + float scores[RET_CATEGORY_SUM]; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + if (temp_scores[i] > 0.5){ + MS_PRINT("MindSpore scores[%d] : [%f]", i, temp_scores[i]); + } + scores[i] = temp_scores[i]; + } + + // Converted to text information that needs to be displayed in the APP. + std::string categoryScore = ""; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + categoryScore += g_labels_name_map[i]; + categoryScore += ":"; + std::string score_str = std::to_string(scores[i]); + categoryScore += score_str; + categoryScore += ";"; + } + return categoryScore; + } + ``` diff --git a/model_zoo/official/lite/app/.gitignore b/model_zoo/official/lite/app/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..42afabfd2abebf31384ca7797186a27a4b7dbee8 --- /dev/null +++ b/model_zoo/official/lite/app/.gitignore @@ -0,0 +1 @@ +/build \ No newline at end of file diff --git a/model_zoo/official/lite/app/CMakeLists.txt b/model_zoo/official/lite/app/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..55ff11cd86f2bfc35c7cf9296ef79a6858f3b1bd --- /dev/null +++ b/model_zoo/official/lite/app/CMakeLists.txt @@ -0,0 +1,94 @@ +# For more information about using CMake with Android Studio, read the +# documentation: https://d.android.com/studio/projects/add-native-code.html + +# Sets the minimum version of CMake required to build the native library. + +cmake_minimum_required(VERSION 3.4.1) + +set(CMAKE_VERBOSE_MAKEFILE on) +set(libs ${CMAKE_SOURCE_DIR}/libs) + + +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}) + + +# ============== Set MindSpore Dependencies. ============= +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore/flatbuffers) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore/ir/dtype) +include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include/MindSpore/schema) + +add_library(mindspore-lite SHARED IMPORTED ) + +set_target_properties(mindspore-lite PROPERTIES IMPORTED_LOCATION + ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libmindspore-lite.so) +# --------------- MindSpore Lite set End. -------------------- + + + +# =============== Set OpenCV Dependencies =================== + +include_directories(${CMAKE_SOURCE_DIR}/opencv/sdk/native/jni/include/) + +add_library(lib-opencv SHARED IMPORTED ) + +set_target_properties(lib-opencv PROPERTIES IMPORTED_LOCATION + ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libopencv_java4.so) + +# --------------- OpenCV set End. --------------------------- + + +# Creates and names a library, sets it as either STATIC +# or SHARED, and provides the relative paths to its source code. +# You can define multiple libraries, and CMake builds them for you. +# Gradle automatically packages shared libraries with your APK. + +file(GLOB_RECURSE cpp_src "src/main/cpp/*.cpp" "src/main/cpp/*.h") + +add_library( # Sets the name of the library. + mlkit-label-MS + + # Sets the library as a shared library. + SHARED + + # Provides a relative path to your source file(s). + ${cpp_src}) + + +# Searches for a specified prebuilt library and stores the path as a +# variable. Because CMake includes system libraries in the search path by +# default, you only need to specify the name of the public NDK library +# you want to add. CMake verifies that the library exists before +# completing its build. + +find_library( # Sets the name of the path variable. + log-lib + + # Specifies the name of the NDK library that + # you want CMake to locate. + log ) + + +find_library( jnigraphics-lib jnig·raphics ) + +# Specifies libraries CMake should link to your target library. You +# can link multiple libraries, such as libraries you define in this +# build script, prebuilt third-party libraries, or system libraries. +add_definitions(-DMNN_USE_LOGCAT) +target_link_libraries( # Specifies the target library. + mlkit-label-MS + + # --- opencv --- + lib-opencv + + # --- mindspore --- + mindspore-lite + + # --- other dependencies.--- + -ljnigraphics + android + + # Links the target library to the log library + ${log-lib} + ) \ No newline at end of file diff --git a/model_zoo/official/lite/app/build.gradle b/model_zoo/official/lite/app/build.gradle new file mode 100644 index 0000000000000000000000000000000000000000..897c99539b03021a01d98013edd6065e2b1c2a05 --- /dev/null +++ b/model_zoo/official/lite/app/build.gradle @@ -0,0 +1,84 @@ +apply plugin: 'com.android.application' + +android { + compileSdkVersion 30 + buildToolsVersion "30.0.1" + + defaultConfig { + applicationId "com.huawei.himindsporedemo" + minSdkVersion 21 + targetSdkVersion 30 + versionCode 1 + versionName "1.0" + + testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" + externalNativeBuild { + cmake { + arguments "-DANDROID_STL=c++_shared" + cppFlags "" + } + } + ndk { + abiFilters 'arm64-v8a' + } + } + aaptOptions { + noCompress '.so', 'ms' + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + + customDebugType { + debuggable true + } + } + externalNativeBuild { + cmake { + path file('CMakeLists.txt') + } + } + ndkVersion '21.3.6528147' + + sourceSets{ + main { + jniLibs.srcDirs = ['libs'] + } + } + packagingOptions{ + pickFirst 'lib/arm64-v8a/libopencv_java4.so' + pickFirst 'lib/arm64-v8a/libmindspore-lite.so' + pickFirst 'lib/arm64-v8a/libmlkit-label-MS.so' + } + +} + + + +// Before gradle build. +// To download some necessary libraries. +apply from:'download.gradle' +/*if (!file("libs/arm64-v8a/libmindspore-lite.so").exists() || + !file("libs/arm64-v8a/libopencv_java4.so").exists()){ + apply from:'download.gradle' +}*/ + + + +dependencies { + implementation fileTree(dir: "libs", include: ["*.jar"]) + implementation 'androidx.appcompat:appcompat:1.1.0' + implementation 'androidx.constraintlayout:constraintlayout:1.1.3' +// implementation project(path: ':sdk') + + testImplementation 'junit:junit:4.12' + androidTestImplementation 'androidx.test.ext:junit:1.1.1' + androidTestImplementation 'androidx.test.espresso:espresso-core:3.2.0' + + implementation 'com.google.android.material:material:1.0.0' + androidTestImplementation 'com.android.support.test:rules:1.0.2' + androidTestImplementation 'com.google.truth:truth:1.0.1' +} diff --git a/model_zoo/official/lite/app/download.gradle b/model_zoo/official/lite/app/download.gradle new file mode 100644 index 0000000000000000000000000000000000000000..139a25a5bf1d26451c0ab1e686cd21dcd3a8cf87 --- /dev/null +++ b/model_zoo/official/lite/app/download.gradle @@ -0,0 +1,73 @@ +/** + * To download necessary library from HuaWei server. + * Including mindspore-lite .so file, opencv .so file and model file. + * The libraries can be downloaded manually. + */ + + +def targetModelFile = "src/main/assets/model/mobilenetv2.ms" +def openCVLibrary_arm64 = "libs/arm64-v8a/libopencv_java4.so" +def mindSporeLibrary_arm64 = "libs/arm64-v8a/libmindspore-lite.so" + +def modelDownloadUrl = "https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_openimage_lite/mobilenetv2.ms" +def opencvDownloadUrl = "https://download.mindspore.cn/model_zoo/official/lite/lib/opencv%204.4.0/libopencv_java4.so" +def mindsporeLiteDownloadUrl = "https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so" + + +task downloadModelFile(type: DownloadUrlTask) { + doFirst { + println "Downloading ${modelDownloadUrl}" + } + sourceUrl = "${modelDownloadUrl}" + target = file("${targetModelFile}") +} + + +task downloadOpenCVLibrary(type: DownloadUrlTask) { + doFirst { + println "Downloading ${opencvDownloadUrl}" + } + sourceUrl = "${opencvDownloadUrl}" + target = file("${openCVLibrary_arm64}") +} + +task downloadMindSporeLibrary(type: DownloadUrlTask) { + doFirst { + println "Downloading ${mindsporeLiteDownloadUrl}" + } + sourceUrl = "${mindsporeLiteDownloadUrl}" + target = file("${mindSporeLibrary_arm64}") +} + +/* + * Using preBuild to download mindspore library, opencv library and model file. + * Run before gradle build. + */ +if (file("libs/arm64-v8a/libmindspore-lite.so").exists()){ + downloadMindSporeLibrary.enabled = false +} + +if (file("libs/arm64-v8a/libopencv_java4.so.so").exists()){ + downloadOpenCVLibrary.enabled = false +} +if (file("src/main/assets/model/mobilenetv2.ms").exists()){ + downloadModelFile.enabled = false +} + +preBuild.dependsOn downloadMindSporeLibrary +preBuild.dependsOn downloadOpenCVLibrary +preBuild.dependsOn downloadModelFile + + +class DownloadUrlTask extends DefaultTask { + @Input + String sourceUrl + + @OutputFile + File target + + @TaskAction + void download() { + ant.get(src: sourceUrl, dest: target) + } +} diff --git a/model_zoo/official/lite/app/proguard-rules.pro b/model_zoo/official/lite/app/proguard-rules.pro new file mode 100644 index 0000000000000000000000000000000000000000..481bb434814107eb79d7a30b676d344b0df2f8ce --- /dev/null +++ b/model_zoo/official/lite/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile \ No newline at end of file diff --git a/model_zoo/official/lite/app/src/androidTest/java/com/huawei/himindsporedemo/ExampleInstrumentedTest.java b/model_zoo/official/lite/app/src/androidTest/java/com/huawei/himindsporedemo/ExampleInstrumentedTest.java new file mode 100644 index 0000000000000000000000000000000000000000..56098d12b7bcf6cbd4e70a245ae17b675a01c365 --- /dev/null +++ b/model_zoo/official/lite/app/src/androidTest/java/com/huawei/himindsporedemo/ExampleInstrumentedTest.java @@ -0,0 +1,26 @@ +package com.huawei.himindsporedemo; + +import android.content.Context; + +import androidx.test.platform.app.InstrumentationRegistry; +import androidx.test.ext.junit.runners.AndroidJUnit4; + +import org.junit.Test; +import org.junit.runner.RunWith; + +import static org.junit.Assert.*; + +/** + * Instrumented test, which will execute on an Android device. + * + * @see Testing documentation + */ +@RunWith(AndroidJUnit4.class) +public class ExampleInstrumentedTest { + @Test + public void useAppContext() { + // Context of the app under test. + Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext(); + assertEquals("com.huawei.himindsporedemo", appContext.getPackageName()); + } +} \ No newline at end of file diff --git a/model_zoo/official/lite/app/src/main/AndroidManifest.xml b/model_zoo/official/lite/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000000000000000000000000000000000000..8332a0a306b133642395b449cac2840aca348df5 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/AndroidManifest.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/model_zoo/official/lite/app/src/main/assets/model/mobilenetv2.ms b/model_zoo/official/lite/app/src/main/assets/model/mobilenetv2.ms new file mode 100644 index 0000000000000000000000000000000000000000..5fa58672c69f06df616e376787fc0ca3d36b0543 Binary files /dev/null and b/model_zoo/official/lite/app/src/main/assets/model/mobilenetv2.ms differ diff --git a/model_zoo/official/lite/app/src/main/cpp/MindSporeNetnative.cpp b/model_zoo/official/lite/app/src/main/cpp/MindSporeNetnative.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f10abff5ea1fa1dbeb29b26cc371326d944f5399 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/MindSporeNetnative.cpp @@ -0,0 +1,296 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019. All rights reserved. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include "MindSporeNetnative.h" +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "MindSpore/MSNetWork.h" +#include "HMS/HMS_label_thres.h" + +using namespace cv; +using namespace mindspore; +using namespace mindspore::tensor; + +#define MS_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MSJNI", format, ##__VA_ARGS__) + + +void BitmapToMat2(JNIEnv *env, jobject &bitmap, Mat &mat, jboolean needUnPremultiplyAlpha) { + AndroidBitmapInfo info; + void *pixels = nullptr; + Mat &dst = mat; + CV_Assert(AndroidBitmap_getInfo(env, bitmap, &info) >= 0); + CV_Assert(info.format == ANDROID_BITMAP_FORMAT_RGBA_8888 || + info.format == ANDROID_BITMAP_FORMAT_RGB_565); + CV_Assert(AndroidBitmap_lockPixels(env, bitmap, &pixels) >= 0); + CV_Assert(pixels); + + dst.create(info.height, info.width, CV_8UC4); + if (info.format == ANDROID_BITMAP_FORMAT_RGBA_8888) { + Mat tmp(info.height, info.width, CV_8UC4, pixels); + if (needUnPremultiplyAlpha) { + cvtColor(tmp, dst, COLOR_RGBA2BGR); + } else { + + tmp.copyTo(dst); + } + } else { + Mat tmp(info.height, info.width, CV_8UC4, pixels); + cvtColor(tmp, dst, COLOR_BGR5652RGBA); + } + AndroidBitmap_unlockPixels(env, bitmap); + return; +} + +void BitmapToMat(JNIEnv *env, jobject &bitmap, Mat &mat) { + BitmapToMat2(env, bitmap, mat, true); +} + +/** + * Processing image with resize and normalize. + */ +cv::Mat PreProcessImageData(cv::Mat input) { + cv::Mat imgFloatTmp, imgResized256, imgResized224; + int resizeWidth = 256; + int resizeHeight = 256; + float normalizMin = 1.0; + float normalizMax = 255.0; + + cv::resize(input, imgFloatTmp, cv::Size(resizeWidth, resizeHeight)); + + + imgFloatTmp.convertTo(imgResized256, CV_32FC3, normalizMin / normalizMax); + + int offsetX = 16; + int offsetY = 16; + int cropWidth = 224; + int cropHeight = 224; + + // Standardization processing. + float meanR = 0.485; + float meanG = 0.456; + float meanB = 0.406; + float varR = 0.229; + float varG = 0.224; + float varB = 0.225; + + cv::Rect roi; + roi.x = offsetX; + roi.y = offsetY; + roi.width = cropWidth; + roi.height = cropHeight; + + // The final image size of the incoming model is 224*224. + imgResized256(roi).copyTo(imgResized224); + + Scalar mean = Scalar(meanR, meanG, meanB); + Scalar var = Scalar(varR, varG, varB); + cv::Mat imgResized1; + cv::Mat imgResized2; + Mat imgMean(imgResized224.size(), CV_32FC3, + mean); // imgMean Each pixel channel is (0.485, 0.456, 0.406) + Mat imgVar(imgResized224.size(), CV_32FC3, + var); // imgVar Each pixel channel is (0.229, 0.224, 0.225) + imgResized1 = imgResized224 - imgMean; + imgResized2 = imgResized1 / imgVar; + return imgResized2; +} + +char *CreateLocalModelBuffer(JNIEnv *env, jobject modelBuffer) { + jbyte *modelAddr = static_cast(env->GetDirectBufferAddress(modelBuffer)); + int modelLen = static_cast(env->GetDirectBufferCapacity(modelBuffer)); + char *buffer(new char[modelLen]); + memcpy(buffer, modelAddr, modelLen); + return buffer; +} + +/** + * To process the result of mindspore inference. + * @param msOutputs + * @return + */ +std::string ProcessRunnetResult( + std::unordered_map> msOutputs) { + + // Get the branch of the model output. + // Use iterators to get map elements. + std::unordered_map>::iterator iter; + iter = msOutputs.begin(); + + // The mobilenetv2.ms model output just one branch. + auto outputString = iter->first; + auto outputTensor = iter->second; + + int tensorNum = outputTensor[0]->ElementsNum(); + MS_PRINT("Number of tensor elements:%d", tensorNum); + + // Get a pointer to the first score. + float *temp_scores = static_cast(outputTensor[0]->MutableData()); + + float scores[RET_CATEGORY_SUM]; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + if (temp_scores[i] > 0.5) { + MS_PRINT("MindSpore scores[%d] : [%f]", i, temp_scores[i]); + } + scores[i] = temp_scores[i]; + } + + // Score for each category. + // Converted to text information that needs to be displayed in the APP. + std::string categoryScore = ""; + for (int i = 0; i < RET_CATEGORY_SUM; ++i) { + categoryScore += g_labels_name_map[i]; + categoryScore += ":"; + std::string score_str = std::to_string(scores[i]); + categoryScore += score_str; + categoryScore += ";"; + } + return categoryScore; +} + + +/** + * The Java layer reads the model into MappedByteBuffer or ByteBuffer to load the model. + */ +extern "C" +JNIEXPORT jlong JNICALL +Java_com_huawei_himindsporedemo_gallery_classify_TrackingMobile_loadModel(JNIEnv *env, jobject thiz, + jobject model_buffer, + jint num_thread) { + // TODO: implement loadModel() + if (nullptr == model_buffer) { + MS_PRINT("error, buffer is nullptr!"); + return (jlong) nullptr; + } + jlong bufferLen = env->GetDirectBufferCapacity(model_buffer); + if (0 == bufferLen) { + MS_PRINT("error, bufferLen is 0!"); + return (jlong) nullptr; + } + + char *modelBuffer = CreateLocalModelBuffer(env, model_buffer); + if (modelBuffer == nullptr) { + MS_PRINT("modelBuffer create failed!"); + return (jlong) nullptr; + } + + // To create a mindspore network inference environment. + void **labelEnv = new void *; + MSNetWork *labelNet = new MSNetWork; + *labelEnv = labelNet; + + lite::Context *context = new lite::Context; + context->thread_num_ = num_thread; + + labelNet->CreateSessionMS(modelBuffer, bufferLen, context); + delete (context); + + if (labelNet->session == nullptr) { + MS_PRINT("MindSpore create session failed!."); + return (jlong) nullptr; + } + + if (model_buffer != nullptr) { + env->DeleteLocalRef(model_buffer); + } + + return (jlong) labelEnv; +} + +/** + * After the inference environment is successfully created, + * sending a picture to the model and run inference. + */ +extern "C" JNIEXPORT jstring JNICALL +Java_com_huawei_himindsporedemo_gallery_classify_TrackingMobile_runNet(JNIEnv *env, jclass type, + jlong netEnv, + jobject srcBitmap) { + + Mat matImageSrc; + BitmapToMat(env, srcBitmap, matImageSrc); + Mat matImgPreprocessed = PreProcessImageData(matImageSrc); + + + ImgDims inputDims; + inputDims.channel = matImgPreprocessed.channels(); + inputDims.width = matImgPreprocessed.cols; + inputDims.height = matImgPreprocessed.rows; + + // Get the mindsore inference environment which created in loadModel(). + void **labelEnv = reinterpret_cast(netEnv); + if (labelEnv == nullptr) { + MS_PRINT("MindSpore error, labelEnv is a nullptr."); + return NULL; + } + MSNetWork *labelNet = static_cast(*labelEnv); + + auto mSession = labelNet->session; + if (mSession == nullptr) { + MS_PRINT("MindSpore error, Session is a nullptr."); + return NULL; + } + MS_PRINT("MindSpore get session."); + + auto msInputs = mSession->GetInputs(); + if (msInputs.size() == 0) { + MS_PRINT("MindSpore error, msInputs.size() equals 0."); + return NULL; + } + auto inTensor = msInputs.front(); + + // dataHWC is the tensor format. + float *dataHWC = new float[inputDims.channel * inputDims.width * inputDims.height]; + float *ptrTmp = reinterpret_cast(matImgPreprocessed.data); + for (int i = 0; i < inputDims.channel * inputDims.width * inputDims.height; ++i) { + dataHWC[i] = ptrTmp[i]; + } + + // Copy dataHWC to the model input tensor. + memcpy(inTensor->MutableData(), dataHWC, + inputDims.channel * inputDims.width * inputDims.height * sizeof(float)); + // When using 'new' to allocate memory space, we need to use 'delete' to free space. + delete[] (dataHWC); + + // After the model and image tensor data is loaded, run inference. + auto status = mSession->RunGraph(); + + if (status != lite::RET_OK) { + MS_PRINT("MindSpore run net error."); + return NULL; + } + + /** + * Get the mindspore inference results. + * Return the map of output node name and MindSpore Lite MSTensor. + */ + auto msOutputs = mSession->GetOutputMapByNode(); + + std::string resultStr = ProcessRunnetResult(msOutputs); + + const char *resultCharData = resultStr.c_str(); + return (env)->NewStringUTF(resultCharData); +} + +extern "C" JNIEXPORT jboolean JNICALL +Java_com_huawei_himindsporedemo_gallery_classify_TrackingMobile_unloadModel(JNIEnv *env, + jclass type, + jlong netEnv) { + MS_PRINT("MindSpore release net."); + void **labelEnv = reinterpret_cast(netEnv); + if (labelEnv == nullptr) { + MS_PRINT("MindSpore error, labelEnv is a nullptr."); + } + MSNetWork *labelNet = static_cast(*labelEnv); + + labelNet->ReleaseNets(); + + return (jboolean) true; +} diff --git a/model_zoo/official/lite/app/src/main/cpp/MindSporeNetnative.h b/model_zoo/official/lite/app/src/main/cpp/MindSporeNetnative.h new file mode 100644 index 0000000000000000000000000000000000000000..5a993269b2e5ddbd3477b188a69100aa124af8ac --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/MindSporeNetnative.h @@ -0,0 +1,8 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019. All rights reserved. + */ + +#ifndef MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H +#define MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H + +#endif //MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H diff --git a/model_zoo/official/lite/app/src/main/cpp/include/HMS/HMS_label_thres.h b/model_zoo/official/lite/app/src/main/cpp/include/HMS/HMS_label_thres.h new file mode 100644 index 0000000000000000000000000000000000000000..cf65de93a64fc28f062f12fdb6a86d73e54f891f --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/HMS/HMS_label_thres.h @@ -0,0 +1,616 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019. All rights reserved. + */ + +#ifndef MNN_JNI_HMS_HMS_LABEL_THRES_H +#define MNN_JNI_HMS_HMS_LABEL_THRES_H + +#include +#include + +constexpr int RET_CATEGORY_SUM = 601; +static std::string g_labels_name_map[RET_CATEGORY_SUM] = { + {"Tortoise"}, + {"Container"}, + {"Magpie"}, + {"Seaturtle"}, + {"Football"}, + {"Ambulance"}, + {"Ladder"}, + {"Toothbrush"}, + {"Syringe"}, + {"Sink"}, + {"Toy"}, + {"Organ(MusicalInstrument) "}, + {"Cassettedeck"}, + {"Apple"}, + {"Humaneye"}, + {"Cosmetics"}, + {"Paddle"}, + {"Snowman"}, + {"Beer"}, + {"Chopsticks"}, + {"Humanbeard"}, + {"Bird"}, + {"Parkingmeter"}, + {"Trafficlight"}, + {"Croissant"}, + {"Cucumber"}, + {"Radish"}, + {"Towel"}, + {"Doll"}, + {"Skull"}, + {"Washingmachine"}, + {"Glove"}, + {"Tick"}, + {"Belt"}, + {"Sunglasses"}, + {"Banjo"}, + {"Cart"}, + {"Ball"}, + {"Backpack"}, + {"Bicycle"}, + {"Homeappliance"}, + {"Centipede"}, + {"Boat"}, + {"Surfboard"}, + {"Boot"}, + {"Headphones"}, + {"Hotdog"}, + {"Shorts"}, + {"Fastfood"}, + {"Bus"}, + {"Boy "}, + {"Screwdriver"}, + {"Bicyclewheel"}, + {"Barge"}, + {"Laptop"}, + {"Miniskirt"}, + {"Drill(Tool)"}, + {"Dress"}, + {"Bear"}, + {"Waffle"}, + {"Pancake"}, + {"Brownbear"}, + {"Woodpecker"}, + {"Bluejay"}, + {"Pretzel"}, + {"Bagel"}, + {"Tower"}, + {"Teapot"}, + {"Person"}, + {"Bowandarrow"}, + {"Swimwear"}, + {"Beehive"}, + {"Brassiere"}, + {"Bee"}, + {"Bat(Animal)"}, + {"Starfish"}, + {"Popcorn"}, + {"Burrito"}, + {"Chainsaw"}, + {"Balloon"}, + {"Wrench"}, + {"Tent"}, + {"Vehicleregistrationplate"}, + {"Lantern"}, + {"Toaster"}, + {"Flashlight"}, + {"Billboard"}, + {"Tiara"}, + {"Limousine"}, + {"Necklace"}, + {"Carnivore"}, + {"Scissors"}, + {"Stairs"}, + {"Computerkeyboard"}, + {"Printer"}, + {"Trafficsign"}, + {"Chair"}, + {"Shirt"}, + {"Poster"}, + {"Cheese"}, + {"Sock"}, + {"Firehydrant"}, + {"Landvehicle"}, + {"Earrings"}, + {"Tie"}, + {"Watercraft"}, + {"Cabinetry"}, + {"Suitcase"}, + {"Muffin"}, + {"Bidet"}, + {"Snack"}, + {"Snowmobile"}, + {"Clock"}, + {"Medicalequipment"}, + {"Cattle"}, + {"Cello"}, + {"Jetski"}, + {"Camel"}, + {"Coat"}, + {"Suit"}, + {"Desk"}, + {"Cat"}, + {"Bronzesculpture"}, + {"Juice"}, + {"Gondola"}, + {"Beetle"}, + {"Cannon"}, + {"Computermouse"}, + {"Cookie"}, + {"Officebuilding"}, + {"Fountain"}, + {"Coin"}, + {"Calculator"}, + {"Cocktail"}, + {"Computermonitor"}, + {"Box"}, + {"Stapler"}, + {"Christmastree"}, + {"Cowboyhat"}, + {"Hikingequipment"}, + {"Studiocouch"}, + {"Drum"}, + {"Dessert"}, + {"Winerack"}, + {"Drink"}, + {"Zucchini"}, + {"Ladle"}, + {"Humanmouth"}, + {"DairyProduct"}, + {"Dice"}, + {"Oven"}, + {"Dinosaur"}, + {"Ratchet(Device)"}, + {"Couch"}, + {"Cricketball"}, + {"Wintermelon"}, + {"Spatula"}, + {"Whiteboard"}, + {"Pencilsharpener"}, + {"Door"}, + {"Hat"}, + {"Shower"}, + {"Eraser"}, + {"Fedora"}, + {"Guacamole"}, + {"Dagger"}, + {"Scarf"}, + {"Dolphin"}, + {"Sombrero"}, + {"Tincan"}, + {"Mug"}, + {"Tap"}, + {"Harborseal"}, + {"Stretcher"}, + {"Canopener"}, + {"Goggles"}, + {"Humanbody"}, + {"Rollerskates"}, + {"Coffeecup"}, + {"Cuttingboard"}, + {"Blender"}, + {"Plumbingfixture"}, + {"Stopsign"}, + {"Officesupplies"}, + {"Volleyball(Ball)"}, + {"Vase"}, + {"Slowcooker"}, + {"Wardrobe"}, + {"Coffee"}, + {"Whisk"}, + {"Papertowel"}, + {"Personalcare"}, + {"Food"}, + {"Sunhat"}, + {"Treehouse"}, + {"Flyingdisc"}, + {"Skirt"}, + {"Gasstove"}, + {"Saltandpeppershakers"}, + {"Mechanicalfan"}, + {"Facepowder"}, + {"Fax"}, + {"Fruit"}, + {"Frenchfries"}, + {"Nightstand"}, + {"Barrel"}, + {"Kite"}, + {"Tart"}, + {"Treadmill"}, + {"Fox"}, + {"Flag"}, + {"Frenchhorn"}, + {"Windowblind"}, + {"Humanfoot"}, + {"Golfcart"}, + {"Jacket"}, + {"Egg(Food)"}, + {"Streetlight"}, + {"Guitar"}, + {"Pillow"}, + {"Humanleg"}, + {"Isopod"}, + {"Grape"}, + {"Humanear"}, + {"Powerplugsandsockets"}, + {"Panda"}, + {"Giraffe"}, + {"Woman"}, + {"Doorhandle"}, + {"Rhinoceros"}, + {"Bathtub"}, + {"Goldfish"}, + {"Houseplant"}, + {"Goat"}, + {"Baseballbat"}, + {"Baseballglove"}, + {"Mixingbowl"}, + {"Marineinvertebrates"}, + {"Kitchenutensil"}, + {"Lightswitch"}, + {"House"}, + {"Horse"}, + {"Stationarybicycle"}, + {"Hammer"}, + {"Ceilingfan"}, + {"Sofabed"}, + {"Adhesivetape "}, + {"Harp"}, + {"Sandal"}, + {"Bicyclehelmet"}, + {"Saucer"}, + {"Harpsichord"}, + {"Humanhair"}, + {"Heater"}, + {"Harmonica"}, + {"Hamster"}, + {"Curtain"}, + {"Bed"}, + {"Kettle"}, + {"Fireplace"}, + {"Scale"}, + {"Drinkingstraw"}, + {"Insect"}, + {"Hairdryer"}, + {"Kitchenware"}, + {"Indoorrower"}, + {"Invertebrate"}, + {"Foodprocessor"}, + {"Bookcase"}, + {"Refrigerator"}, + {"Wood-burningstove"}, + {"Punchingbag"}, + {"Commonfig"}, + {"Cocktailshaker"}, + {"Jaguar(Animal)"}, + {"Golfball"}, + {"Fashionaccessory"}, + {"Alarmclock"}, + {"Filingcabinet"}, + {"Artichoke"}, + {"Table"}, + {"Tableware"}, + {"Kangaroo"}, + {"Koala"}, + {"Knife"}, + {"Bottle"}, + {"Bottleopener"}, + {"Lynx"}, + {"Lavender(Plant)"}, + {"Lighthouse"}, + {"Dumbbell"}, + {"Humanhead"}, + {"Bowl"}, + {"Humidifier"}, + {"Porch"}, + {"Lizard"}, + {"Billiardtable"}, + {"Mammal"}, + {"Mouse"}, + {"Motorcycle"}, + {"Musicalinstrument"}, + {"Swimcap"}, + {"Fryingpan"}, + {"Snowplow"}, + {"Bathroomcabinet"}, + {"Missile"}, + {"Bust"}, + {"Man"}, + {"Waffleiron"}, + {"Milk"}, + {"Ringbinder"}, + {"Plate"}, + {"Mobilephone"}, + {"Bakedgoods"}, + {"Mushroom"}, + {"Crutch"}, + {"Pitcher(Container)"}, + {"Mirror"}, + {"Personalflotationdevice"}, + {"Tabletennisracket"}, + {"Pencilcase"}, + {"Musicalkeyboard"}, + {"Scoreboard"}, + {"Briefcase"}, + {"Kitchenknife"}, + {"Nail(Construction)"}, + {"Tennisball"}, + {"Plasticbag"}, + {"Oboe"}, + {"Chestofdrawers"}, + {"Ostrich"}, + {"Piano"}, + {"Girl"}, + {"Plant"}, + {"Potato"}, + {"Hairspray"}, + {"Sportsequipment"}, + {"Pasta"}, + {"Penguin"}, + {"Pumpkin"}, + {"Pear"}, + {"Infantbed"}, + {"Polarbear"}, + {"Mixer"}, + {"Cupboard"}, + {"Jacuzzi"}, + {"Pizza"}, + {"Digitalclock"}, + {"Pig"}, + {"Reptile"}, + {"Rifle"}, + {"Lipstick"}, + {"Skateboard"}, + {"Raven"}, + {"Highheels"}, + {"Redpanda"}, + {"Rose"}, + {"Rabbit"}, + {"Sculpture"}, + {"Saxophone"}, + {"Shotgun"}, + {"Seafood"}, + {"Submarinesandwich"}, + {"Snowboard"}, + {"Sword"}, + {"Pictureframe"}, + {"Sushi"}, + {"Loveseat"}, + {"Ski"}, + {"Squirrel"}, + {"Tripod"}, + {"Stethoscope"}, + {"Submarine"}, + {"Scorpion"}, + {"Segway"}, + {"Trainingbench"}, + {"Snake"}, + {"Coffeetable"}, + {"Skyscraper"}, + {"Sheep"}, + {"Television"}, + {"Trombone"}, + {"Tea"}, + {"Tank"}, + {"Taco"}, + {"Telephone"}, + {"Torch"}, + {"Tiger"}, + {"Strawberry"}, + {"Trumpet"}, + {"Tree"}, + {"Tomato"}, + {"Train"}, + {"Tool"}, + {"Picnicbasket"}, + {"Cookingspray"}, + {"Trousers"}, + {"Bowlingequipment"}, + {"Footballhelmet"}, + {"Truck"}, + {"Measuringcup"}, + {"Coffeemaker"}, + {"Violin"}, + {"Vehicle"}, + {"Handbag"}, + {"Papercutter"}, + {"Wine"}, + {"Weapon"}, + {"Wheel"}, + {"Worm"}, + {"Wok"}, + {"Whale"}, + {"Zebra"}, + {"Autopart"}, + {"Jug"}, + {"Pizzacutter"}, + {"Cream"}, + {"Monkey"}, + {"Lion"}, + {"Bread"}, + {"Platter"}, + {"Chicken"}, + {"Eagle"}, + {"Helicopter"}, + {"Owl"}, + {"Duck"}, + {"Turtle"}, + {"Hippopotamus"}, + {"Crocodile"}, + {"Toilet"}, + {"Toiletpaper"}, + {"Squid"}, + {"Clothing"}, + {"Footwear"}, + {"Lemon"}, + {"Spider"}, + {"Deer"}, + {"Frog"}, + {"Banana"}, + {"Rocket"}, + {"Wineglass"}, + {"Countertop"}, + {"Tabletcomputer"}, + {"Wastecontainer"}, + {"Swimmingpool"}, + {"Dog"}, + {"Book"}, + {"Elephant"}, + {"Shark"}, + {"Candle"}, + {"Leopard"}, + {"Axe"}, + {"Handdryer"}, + {"Soapdispenser"}, + {"Porcupine"}, + {"Flower"}, + {"Canary"}, + {"Cheetah"}, + {"Palmtree"}, + {"Hamburger"}, + {"Maple"}, + {"Building"}, + {"Fish"}, + {"Lobster"}, + {"GardenAsparagus"}, + {"Furniture"}, + {"Hedgehog"}, + {"Airplane"}, + {"Spoon"}, + {"Otter"}, + {"Bull"}, + {"Oyster"}, + {"Horizontalbar"}, + {"Conveniencestore"}, + {"Bomb"}, + {"Bench"}, + {"Icecream"}, + {"Caterpillar"}, + {"Butterfly"}, + {"Parachute"}, + {"Orange"}, + {"Antelope"}, + {"Beaker"}, + {"Mothsandbutterflies"}, + {"Window"}, + {"Closet"}, + {"Castle"}, + {"Jellyfish"}, + {"Goose"}, + {"Mule"}, + {"Swan"}, + {"Peach"}, + {"Coconut"}, + {"Seatbelt"}, + {"Raccoon"}, + {"Chisel"}, + {"Fork"}, + {"Lamp"}, + {"Camera"}, + {"Squash(Plant)"}, + {"Racket"}, + {"Humanface"}, + {"Humanarm"}, + {"Vegetable"}, + {"Diaper"}, + {"Unicycle"}, + {"Falcon"}, + {"Chime"}, + {"Snail"}, + {"Shellfish"}, + {"Cabbage"}, + {"Carrot"}, + {"Mango"}, + {"Jeans"}, + {"Flowerpot"}, + {"Pineapple"}, + {"Drawer"}, + {"Stool"}, + {"Envelope"}, + {"Cake"}, + {"Dragonfly"}, + {"Commonsunflower"}, + {"Microwaveoven"}, + {"Honeycomb"}, + {"Marinemammal"}, + {"Sealion"}, + {"Ladybug"}, + {"Shelf"}, + {"Watch"}, + {"Candy"}, + {"Salad"}, + {"Parrot"}, + {"Handgun"}, + {"Sparrow"}, + {"Van"}, + {"Grinder"}, + {"Spicerack"}, + {"Lightbulb"}, + {"Cordedphone"}, + {"Sportsuniform"}, + {"Tennisracket"}, + {"Wallclock"}, + {"Servingtray"}, + {"Kitchen&diningroomtable"}, + {"Dogbed"}, + {"Cakestand"}, + {"Catfurniture"}, + {"Bathroomaccessory"}, + {"Facialtissueholder"}, + {"Pressurecooker"}, + {"Kitchenappliance"}, + {"Tire"}, + {"Ruler"}, + {"Luggageandbags"}, + {"Microphone"}, + {"Broccoli"}, + {"Umbrella"}, + {"Pastry"}, + {"Grapefruit"}, + {"Band-aid"}, + {"Animal"}, + {"Bellpepper"}, + {"Turkey"}, + {"Lily"}, + {"Pomegranate"}, + {"Doughnut"}, + {"Glasses"}, + {"Humannose"}, + {"Pen"}, + {"Ant"}, + {"Car"}, + {"Aircraft"}, + {"Humanhand"}, + {"Skunk"}, + {"Teddybear"}, + {"Watermelon"}, + {"Cantaloupe"}, + {"Dishwasher"}, + {"Flute"}, + {"Balancebeam"}, + {"Sandwich"}, + {"Shrimp"}, + {"Sewingmachine"}, + {"Binoculars"}, + {"Raysandskates"}, + {"Ipod"}, + {"Accordion"}, + {"Willow"}, + {"Crab"}, + {"Crown"}, + {"Seahorse"}, + {"Perfume"}, + {"Alpaca"}, + {"Taxi"}, + {"Canoe"}, + {"Remotecontrol"}, + {"Wheelchair"}, + {"Rugbyball"}, + {"Armadillo"}, + {"Maracas"}, + {"Helmet"}, +}; + +#endif // MNN_JNI_HMS_HMS_LABEL_THRES_H \ No newline at end of file diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/MSNetWork.cpp b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/MSNetWork.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d17b565aa11b6d5d942403cf6409d9e1defb777 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/MSNetWork.cpp @@ -0,0 +1,41 @@ +#include "MSNetWork.h" +#include +#include +#include "errorcode.h" + +#define MS_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MSJNI", format, ##__VA_ARGS__) + +MSNetWork::MSNetWork(void) : session(nullptr) {} +MSNetWork::~MSNetWork(void) {} + + +void MSNetWork::CreateSessionMS(char* modelBuffer, size_t bufferLen, mindspore::lite::Context* ctx) +{ + session = mindspore::session::LiteSession::CreateSession(ctx); + if (session == nullptr){ + MS_PRINT("Create Session failed."); + return; + } + + // Compile model. + auto model = mindspore::lite::Model::Import(modelBuffer, bufferLen); + if (model == nullptr){ + MS_PRINT("Import model failed."); + return; + } + + int ret = session->CompileGraph(model); + if (ret != mindspore::lite::RET_OK){ + MS_PRINT("CompileGraph failed."); + return; + } + +} + +int MSNetWork::ReleaseNets(void) +{ + delete session; +// delete model; + return 0; +} + diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/MSNetWork.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/MSNetWork.h new file mode 100644 index 0000000000000000000000000000000000000000..b320c6d25879871bb7f147277e38e20edf356df2 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/MSNetWork.h @@ -0,0 +1,50 @@ +// * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019. All rights reserved. + +#ifndef MSNETWORK_H +#define MSNETWORK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace mindspore; + +struct ImgDims { + int channel = 0; + int width = 0; + int height = 0; +}; + +/*struct SessIterm { + std::shared_ptr sess = nullptr; +};*/ + + + +class MSNetWork { +public: + MSNetWork(); + ~MSNetWork(); + + void CreateSessionMS(char* modelBuffer, size_t bufferLen, mindspore::lite::Context* ctx); + int ReleaseNets(void); + mindspore::session::LiteSession *session; + mindspore::lite::Model *model; + +private: + //std::map sess; +}; + +#endif diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/context.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/context.h new file mode 100644 index 0000000000000000000000000000000000000000..35daea0371b946a4db6e1d668d446ae2f64b6d16 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/context.h @@ -0,0 +1,71 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_CONTEXT_H_ +#define MINDSPORE_LITE_INCLUDE_CONTEXT_H_ + +#include +#include +#include "ms_tensor.h" +#include "thread_pool_config.h" + +namespace mindspore { + namespace lite { + /// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. + /// + /// \note List public class and interface for reference. + class Allocator; + + /// \brief DeviceType defined for holding user's preferred backend. + typedef enum { + DT_CPU, /**< CPU device type */ + DT_GPU, /**< GPU device type */ + DT_NPU /**< NPU device type, not supported yet */ + } DeviceType; + + /// \brief DeviceContext defined for holding DeviceType. + typedef struct { + DeviceType type; /**< device type */ + } DeviceContext; + + /// \brief Context defined for holding environment variables during runtime. + class MS_API Context { + public: + /// \brief Constructor of MindSpore Lite Context using default value for parameters. + /// + /// \return Instance of MindSpore Lite Context. + Context(); + + /// \brief Constructor of MindSpore Lite Context using input value for parameters. + /// + /// \param[in] thread_num Define the work thread number during the runtime. + /// \param[in] allocator Define the allocator for malloc. + /// \param[in] device_ctx Define device information during the runtime. + Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx); + + /// \brief Destructor of MindSpore Lite Context. + virtual ~Context(); + + public: + bool float16_priority = false; /**< allow priority select float16 kernel */ + DeviceContext device_ctx_{DT_CPU}; + int thread_num_ = 2; /**< thread number config for thread pool */ + std::shared_ptr allocator = nullptr; + CpuBindMode cpu_bind_mode_ = MID_CPU; + }; + } +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/errorcode.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/errorcode.h new file mode 100644 index 0000000000000000000000000000000000000000..b04c6dda3e1e853dda706fee84652d963bcb53e7 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/errorcode.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ +#define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ + +namespace mindspore { +namespace lite { +/// \brief STATUS defined for holding error code in MindSpore Lite. +using STATUS = int; + +/* Success */ +constexpr int RET_OK = 0; /**< No error occurs. */ + +/* Common error code, range: [-1, -100]*/ +constexpr int RET_ERROR = -1; /**< Common error code. */ +constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/ +constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/ +constexpr int RET_NO_CHANGE = -4; /**< No change. */ +constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */ +constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */ + +/* Executor error code, range: [-101,-200] */ +constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to check range. */ +constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to check input tensor. */ +constexpr int RET_REENTRANT_ERROR = -103; /**< Exist executor running. */ + +/* Graph error code, range: [-201,-300] */ +constexpr int RET_GRAPH_FILE_ERR = -201; /**< Failed to verify graph file. */ + +/* Node error code, range: [-301,-400] */ +constexpr int RET_NOT_FIND_OP = -301; /**< Failed to find operator. */ +constexpr int RET_INVALID_OP_NAME = -302; /**< Invalid operator name. */ +constexpr int RET_INVALID_OP_ATTR = -303; /**< Invalid operator attr. */ +constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. */ + +/* Tensor error code, range: [-401,-500] */ +constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */ + +/* InferShape error code, range: [-501,-600] */ +constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ +constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */ +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/flatbuffers.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/flatbuffers.h new file mode 100644 index 0000000000000000000000000000000000000000..f1a13f10052edf319606f1ca3526898d032a5296 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/flatbuffers.h @@ -0,0 +1,2613 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_H_ +#define FLATBUFFERS_H_ + +#include "MindSpore/flatbuffers/include/base.h" + +#if defined(FLATBUFFERS_NAN_DEFAULTS) +#include +#endif + +namespace flatbuffers { +// Generic 'operator==' with conditional specialisations. +template inline bool IsTheSameAs(T e, T def) { return e == def; } + +#if defined(FLATBUFFERS_NAN_DEFAULTS) && \ + (!defined(_MSC_VER) || _MSC_VER >= 1800) +// Like `operator==(e, def)` with weak NaN if T=(float|double). +template<> inline bool IsTheSameAs(float e, float def) { + return (e == def) || (std::isnan(def) && std::isnan(e)); +} +template<> inline bool IsTheSameAs(double e, double def) { + return (e == def) || (std::isnan(def) && std::isnan(e)); +} +#endif + +// Wrapper for uoffset_t to allow safe template specialization. +// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset). +template struct Offset { + uoffset_t o; + Offset() : o(0) {} + Offset(uoffset_t _o) : o(_o) {} + Offset Union() const { return Offset(o); } + bool IsNull() const { return !o; } +}; + +inline void EndianCheck() { + int endiantest = 1; + // If this fails, see FLATBUFFERS_LITTLEENDIAN above. + FLATBUFFERS_ASSERT(*reinterpret_cast(&endiantest) == + FLATBUFFERS_LITTLEENDIAN); + (void)endiantest; +} + +template FLATBUFFERS_CONSTEXPR size_t AlignOf() { + // clang-format off + #ifdef _MSC_VER + return __alignof(T); + #else + #ifndef alignof + return __alignof__(T); + #else + return alignof(T); + #endif + #endif + // clang-format on +} + +// When we read serialized data from memory, in the case of most scalars, +// we want to just read T, but in the case of Offset, we want to actually +// perform the indirection and return a pointer. +// The template specialization below does just that. +// It is wrapped in a struct since function templates can't overload on the +// return type like this. +// The typedef is for the convenience of callers of this function +// (avoiding the need for a trailing return decltype) +template struct IndirectHelper { + typedef T return_type; + typedef T mutable_return_type; + static const size_t element_stride = sizeof(T); + static return_type Read(const uint8_t *p, uoffset_t i) { + return EndianScalar((reinterpret_cast(p))[i]); + } +}; +template struct IndirectHelper> { + typedef const T *return_type; + typedef T *mutable_return_type; + static const size_t element_stride = sizeof(uoffset_t); + static return_type Read(const uint8_t *p, uoffset_t i) { + p += i * sizeof(uoffset_t); + return reinterpret_cast(p + ReadScalar(p)); + } +}; +template struct IndirectHelper { + typedef const T *return_type; + typedef T *mutable_return_type; + static const size_t element_stride = sizeof(T); + static return_type Read(const uint8_t *p, uoffset_t i) { + return reinterpret_cast(p + i * sizeof(T)); + } +}; + +// An STL compatible iterator implementation for Vector below, effectively +// calling Get() for every element. +template struct VectorIterator { + typedef std::random_access_iterator_tag iterator_category; + typedef IT value_type; + typedef ptrdiff_t difference_type; + typedef IT *pointer; + typedef IT &reference; + + VectorIterator(const uint8_t *data, uoffset_t i) + : data_(data + IndirectHelper::element_stride * i) {} + VectorIterator(const VectorIterator &other) : data_(other.data_) {} + VectorIterator() : data_(nullptr) {} + + VectorIterator &operator=(const VectorIterator &other) { + data_ = other.data_; + return *this; + } + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + VectorIterator &operator=(VectorIterator &&other) { + data_ = other.data_; + return *this; + } + #endif // !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + + bool operator==(const VectorIterator &other) const { + return data_ == other.data_; + } + + bool operator<(const VectorIterator &other) const { + return data_ < other.data_; + } + + bool operator!=(const VectorIterator &other) const { + return data_ != other.data_; + } + + difference_type operator-(const VectorIterator &other) const { + return (data_ - other.data_) / IndirectHelper::element_stride; + } + + IT operator*() const { return IndirectHelper::Read(data_, 0); } + + IT operator->() const { return IndirectHelper::Read(data_, 0); } + + VectorIterator &operator++() { + data_ += IndirectHelper::element_stride; + return *this; + } + + VectorIterator operator++(int) { + VectorIterator temp(data_, 0); + data_ += IndirectHelper::element_stride; + return temp; + } + + VectorIterator operator+(const uoffset_t &offset) const { + return VectorIterator(data_ + offset * IndirectHelper::element_stride, + 0); + } + + VectorIterator &operator+=(const uoffset_t &offset) { + data_ += offset * IndirectHelper::element_stride; + return *this; + } + + VectorIterator &operator--() { + data_ -= IndirectHelper::element_stride; + return *this; + } + + VectorIterator operator--(int) { + VectorIterator temp(data_, 0); + data_ -= IndirectHelper::element_stride; + return temp; + } + + VectorIterator operator-(const uoffset_t &offset) const { + return VectorIterator(data_ - offset * IndirectHelper::element_stride, + 0); + } + + VectorIterator &operator-=(const uoffset_t &offset) { + data_ -= offset * IndirectHelper::element_stride; + return *this; + } + + private: + const uint8_t *data_; +}; + +template struct VectorReverseIterator : + public std::reverse_iterator { + + explicit VectorReverseIterator(Iterator iter) : iter_(iter) {} + + typename Iterator::value_type operator*() const { return *(iter_ - 1); } + + typename Iterator::value_type operator->() const { return *(iter_ - 1); } + + private: + Iterator iter_; +}; + +struct String; + +// This is used as a helper type for accessing vectors. +// Vector::data() assumes the vector elements start after the length field. +template class Vector { + public: + typedef VectorIterator::mutable_return_type> + iterator; + typedef VectorIterator::return_type> + const_iterator; + typedef VectorReverseIterator reverse_iterator; + typedef VectorReverseIterator const_reverse_iterator; + + uoffset_t size() const { return EndianScalar(length_); } + + // Deprecated: use size(). Here for backwards compatibility. + FLATBUFFERS_ATTRIBUTE(deprecated("use size() instead")) + uoffset_t Length() const { return size(); } + + typedef typename IndirectHelper::return_type return_type; + typedef typename IndirectHelper::mutable_return_type mutable_return_type; + + return_type Get(uoffset_t i) const { + FLATBUFFERS_ASSERT(i < size()); + return IndirectHelper::Read(Data(), i); + } + + return_type operator[](uoffset_t i) const { return Get(i); } + + // If this is a Vector of enums, T will be its storage type, not the enum + // type. This function makes it convenient to retrieve value with enum + // type E. + template E GetEnum(uoffset_t i) const { + return static_cast(Get(i)); + } + + // If this a vector of unions, this does the cast for you. There's no check + // to make sure this is the right type! + template const U *GetAs(uoffset_t i) const { + return reinterpret_cast(Get(i)); + } + + // If this a vector of unions, this does the cast for you. There's no check + // to make sure this is actually a string! + const String *GetAsString(uoffset_t i) const { + return reinterpret_cast(Get(i)); + } + + const void *GetStructFromOffset(size_t o) const { + return reinterpret_cast(Data() + o); + } + + iterator begin() { return iterator(Data(), 0); } + const_iterator begin() const { return const_iterator(Data(), 0); } + + iterator end() { return iterator(Data(), size()); } + const_iterator end() const { return const_iterator(Data(), size()); } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + + reverse_iterator rend() { return reverse_iterator(end()); } + const_reverse_iterator rend() const { return const_reverse_iterator(end()); } + + const_iterator cbegin() const { return begin(); } + + const_iterator cend() const { return end(); } + + const_reverse_iterator crbegin() const { return rbegin(); } + + const_reverse_iterator crend() const { return rend(); } + + // Change elements if you have a non-const pointer to this object. + // Scalars only. See reflection.h, and the documentation. + void Mutate(uoffset_t i, const T &val) { + FLATBUFFERS_ASSERT(i < size()); + WriteScalar(data() + i, val); + } + + // Change an element of a vector of tables (or strings). + // "val" points to the new table/string, as you can obtain from + // e.g. reflection::AddFlatBuffer(). + void MutateOffset(uoffset_t i, const uint8_t *val) { + FLATBUFFERS_ASSERT(i < size()); + static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types"); + WriteScalar(data() + i, + static_cast(val - (Data() + i * sizeof(uoffset_t)))); + } + + // Get a mutable pointer to tables/strings inside this vector. + mutable_return_type GetMutableObject(uoffset_t i) const { + FLATBUFFERS_ASSERT(i < size()); + return const_cast(IndirectHelper::Read(Data(), i)); + } + + // The raw data in little endian format. Use with care. + const uint8_t *Data() const { + return reinterpret_cast(&length_ + 1); + } + + uint8_t *Data() { return reinterpret_cast(&length_ + 1); } + + // Similarly, but typed, much like std::vector::data + const T *data() const { return reinterpret_cast(Data()); } + T *data() { return reinterpret_cast(Data()); } + + template return_type LookupByKey(K key) const { + void *search_result = std::bsearch( + &key, Data(), size(), IndirectHelper::element_stride, KeyCompare); + + if (!search_result) { + return nullptr; // Key not found. + } + + const uint8_t *element = reinterpret_cast(search_result); + + return IndirectHelper::Read(element, 0); + } + + protected: + // This class is only used to access pre-existing data. Don't ever + // try to construct these manually. + Vector(); + + uoffset_t length_; + + private: + // This class is a pointer. Copying will therefore create an invalid object. + // Private and unimplemented copy constructor. + Vector(const Vector &); + + template static int KeyCompare(const void *ap, const void *bp) { + const K *key = reinterpret_cast(ap); + const uint8_t *data = reinterpret_cast(bp); + auto table = IndirectHelper::Read(data, 0); + + // std::bsearch compares with the operands transposed, so we negate the + // result here. + return -table->KeyCompareWithValue(*key); + } +}; + +// Represent a vector much like the template above, but in this case we +// don't know what the element types are (used with reflection.h). +class VectorOfAny { + public: + uoffset_t size() const { return EndianScalar(length_); } + + const uint8_t *Data() const { + return reinterpret_cast(&length_ + 1); + } + uint8_t *Data() { return reinterpret_cast(&length_ + 1); } + + protected: + VectorOfAny(); + + uoffset_t length_; + + private: + VectorOfAny(const VectorOfAny &); +}; + +#ifndef FLATBUFFERS_CPP98_STL +template +Vector> *VectorCast(Vector> *ptr) { + static_assert(std::is_base_of::value, "Unrelated types"); + return reinterpret_cast> *>(ptr); +} + +template +const Vector> *VectorCast(const Vector> *ptr) { + static_assert(std::is_base_of::value, "Unrelated types"); + return reinterpret_cast> *>(ptr); +} +#endif + +// Convenient helper function to get the length of any vector, regardless +// of whether it is null or not (the field is not set). +template static inline size_t VectorLength(const Vector *v) { + return v ? v->size() : 0; +} + +// Lexicographically compare two strings (possibly containing nulls), and +// return true if the first is less than the second. +static inline bool StringLessThan(const char *a_data, uoffset_t a_size, + const char *b_data, uoffset_t b_size) { + const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size)); + return cmp == 0 ? a_size < b_size : cmp < 0; +} + +struct String : public Vector { + const char *c_str() const { return reinterpret_cast(Data()); } + std::string str() const { return std::string(c_str(), size()); } + + // clang-format off + #ifdef FLATBUFFERS_HAS_STRING_VIEW + MindSpore.flatbuffers::string_view string_view() const { + return MindSpore.flatbuffers::string_view(c_str(), size()); + } + #endif // FLATBUFFERS_HAS_STRING_VIEW + // clang-format on + + bool operator<(const String &o) const { + return StringLessThan(this->data(), this->size(), o.data(), o.size()); + } +}; + +// Convenience function to get std::string from a String returning an empty +// string on null pointer. +static inline std::string GetString(const String * str) { + return str ? str->str() : ""; +} + +// Convenience function to get char* from a String returning an empty string on +// null pointer. +static inline const char * GetCstring(const String * str) { + return str ? str->c_str() : ""; +} + +// Allocator interface. This is MindSpore.flatbuffers-specific and meant only for +// `vector_downward` usage. +class Allocator { + public: + virtual ~Allocator() {} + + // Allocate `size` bytes of memory. + virtual uint8_t *allocate(size_t size) = 0; + + // Deallocate `size` bytes of memory at `p` allocated by this allocator. + virtual void deallocate(uint8_t *p, size_t size) = 0; + + // Reallocate `new_size` bytes of memory, replacing the old region of size + // `old_size` at `p`. In contrast to a normal realloc, this grows downwards, + // and is intended specifcally for `vector_downward` use. + // `in_use_back` and `in_use_front` indicate how much of `old_size` is + // actually in use at each end, and needs to be copied. + virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, + size_t new_size, size_t in_use_back, + size_t in_use_front) { + FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows + uint8_t *new_p = allocate(new_size); + memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, + in_use_front); + deallocate(old_p, old_size); + return new_p; + } + + protected: + // Called by `reallocate_downward` to copy memory from `old_p` of `old_size` + // to `new_p` of `new_size`. Only memory of size `in_use_front` and + // `in_use_back` will be copied from the front and back of the old memory + // allocation. + void memcpy_downward(uint8_t *old_p, size_t old_size, + uint8_t *new_p, size_t new_size, + size_t in_use_back, size_t in_use_front) { + memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back, + in_use_back); + memcpy(new_p, old_p, in_use_front); + } +}; + +// DefaultAllocator uses new/delete to allocate memory regions +class DefaultAllocator : public Allocator { + public: + uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE { + return new uint8_t[size]; + } + + void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { + delete[] p; + } + + static void dealloc(void *p, size_t) { + delete[] static_cast(p); + } +}; + +// These functions allow for a null allocator to mean use the default allocator, +// as used by DetachedBuffer and vector_downward below. +// This is to avoid having a statically or dynamically allocated default +// allocator, or having to move it between the classes that may own it. +inline uint8_t *Allocate(Allocator *allocator, size_t size) { + return allocator ? allocator->allocate(size) + : DefaultAllocator().allocate(size); +} + +inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) { + if (allocator) allocator->deallocate(p, size); + else DefaultAllocator().deallocate(p, size); +} + +inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p, + size_t old_size, size_t new_size, + size_t in_use_back, size_t in_use_front) { + return allocator + ? allocator->reallocate_downward(old_p, old_size, new_size, + in_use_back, in_use_front) + : DefaultAllocator().reallocate_downward(old_p, old_size, new_size, + in_use_back, in_use_front); +} + +// DetachedBuffer is a finished flatbuffer memory region, detached from its +// builder. The original memory region and allocator are also stored so that +// the DetachedBuffer can manage the memory lifetime. +class DetachedBuffer { + public: + DetachedBuffer() + : allocator_(nullptr), + own_allocator_(false), + buf_(nullptr), + reserved_(0), + cur_(nullptr), + size_(0) {} + + DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf, + size_t reserved, uint8_t *cur, size_t sz) + : allocator_(allocator), + own_allocator_(own_allocator), + buf_(buf), + reserved_(reserved), + cur_(cur), + size_(sz) {} + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + DetachedBuffer(DetachedBuffer &&other) + : allocator_(other.allocator_), + own_allocator_(other.own_allocator_), + buf_(other.buf_), + reserved_(other.reserved_), + cur_(other.cur_), + size_(other.size_) { + other.reset(); + } + // clang-format off + #endif // !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + DetachedBuffer &operator=(DetachedBuffer &&other) { + destroy(); + + allocator_ = other.allocator_; + own_allocator_ = other.own_allocator_; + buf_ = other.buf_; + reserved_ = other.reserved_; + cur_ = other.cur_; + size_ = other.size_; + + other.reset(); + + return *this; + } + // clang-format off + #endif // !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + + ~DetachedBuffer() { destroy(); } + + const uint8_t *data() const { return cur_; } + + uint8_t *data() { return cur_; } + + size_t size() const { return size_; } + + // clang-format off + #if 0 // disabled for now due to the ordering of classes in this header + template + bool Verify() const { + Verifier verifier(data(), size()); + return verifier.Verify(nullptr); + } + + template + const T* GetRoot() const { + return MindSpore.flatbuffers::GetRoot(data()); + } + + template + T* GetRoot() { + return MindSpore.flatbuffers::GetRoot(data()); + } + #endif + // clang-format on + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + // These may change access mode, leave these at end of public section + FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other)) + FLATBUFFERS_DELETE_FUNC( + DetachedBuffer &operator=(const DetachedBuffer &other)) + // clang-format off + #endif // !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + +protected: + Allocator *allocator_; + bool own_allocator_; + uint8_t *buf_; + size_t reserved_; + uint8_t *cur_; + size_t size_; + + inline void destroy() { + if (buf_) Deallocate(allocator_, buf_, reserved_); + if (own_allocator_ && allocator_) { delete allocator_; } + reset(); + } + + inline void reset() { + allocator_ = nullptr; + own_allocator_ = false; + buf_ = nullptr; + reserved_ = 0; + cur_ = nullptr; + size_ = 0; + } +}; + +// This is a minimal replication of std::vector functionality, +// except growing from higher to lower addresses. i.e push_back() inserts data +// in the lowest address in the vector. +// Since this vector leaves the lower part unused, we support a "scratch-pad" +// that can be stored there for temporary data, to share the allocated space. +// Essentially, this supports 2 std::vectors in a single buffer. +class vector_downward { + public: + explicit vector_downward(size_t initial_size, + Allocator *allocator, + bool own_allocator, + size_t buffer_minalign) + : allocator_(allocator), + own_allocator_(own_allocator), + initial_size_(initial_size), + buffer_minalign_(buffer_minalign), + reserved_(0), + buf_(nullptr), + cur_(nullptr), + scratch_(nullptr) {} + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + vector_downward(vector_downward &&other) + #else + vector_downward(vector_downward &other) + #endif // defined(FLATBUFFERS_CPP98_STL) + // clang-format on + : allocator_(other.allocator_), + own_allocator_(other.own_allocator_), + initial_size_(other.initial_size_), + buffer_minalign_(other.buffer_minalign_), + reserved_(other.reserved_), + buf_(other.buf_), + cur_(other.cur_), + scratch_(other.scratch_) { + // No change in other.allocator_ + // No change in other.initial_size_ + // No change in other.buffer_minalign_ + other.own_allocator_ = false; + other.reserved_ = 0; + other.buf_ = nullptr; + other.cur_ = nullptr; + other.scratch_ = nullptr; + } + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + vector_downward &operator=(vector_downward &&other) { + // Move construct a temporary and swap idiom + vector_downward temp(std::move(other)); + swap(temp); + return *this; + } + // clang-format off + #endif // defined(FLATBUFFERS_CPP98_STL) + // clang-format on + + ~vector_downward() { + clear_buffer(); + clear_allocator(); + } + + void reset() { + clear_buffer(); + clear(); + } + + void clear() { + if (buf_) { + cur_ = buf_ + reserved_; + } else { + reserved_ = 0; + cur_ = nullptr; + } + clear_scratch(); + } + + void clear_scratch() { + scratch_ = buf_; + } + + void clear_allocator() { + if (own_allocator_ && allocator_) { delete allocator_; } + allocator_ = nullptr; + own_allocator_ = false; + } + + void clear_buffer() { + if (buf_) Deallocate(allocator_, buf_, reserved_); + buf_ = nullptr; + } + + // Relinquish the pointer to the caller. + uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) { + auto *buf = buf_; + allocated_bytes = reserved_; + offset = static_cast(cur_ - buf_); + + // release_raw only relinquishes the buffer ownership. + // Does not deallocate or reset the allocator. Destructor will do that. + buf_ = nullptr; + clear(); + return buf; + } + + // Relinquish the pointer to the caller. + DetachedBuffer release() { + // allocator ownership (if any) is transferred to DetachedBuffer. + DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_, + size()); + if (own_allocator_) { + allocator_ = nullptr; + own_allocator_ = false; + } + buf_ = nullptr; + clear(); + return fb; + } + + size_t ensure_space(size_t len) { + FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_); + if (len > static_cast(cur_ - scratch_)) { reallocate(len); } + // Beyond this, signed offsets may not have enough range: + // (FlatBuffers > 2GB not supported). + FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE); + return len; + } + + inline uint8_t *make_space(size_t len) { + size_t space = ensure_space(len); + cur_ -= space; + return cur_; + } + + // Returns nullptr if using the DefaultAllocator. + Allocator *get_custom_allocator() { return allocator_; } + + uoffset_t size() const { + return static_cast(reserved_ - (cur_ - buf_)); + } + + uoffset_t scratch_size() const { + return static_cast(scratch_ - buf_); + } + + size_t capacity() const { return reserved_; } + + uint8_t *data() const { + FLATBUFFERS_ASSERT(cur_); + return cur_; + } + + uint8_t *scratch_data() const { + FLATBUFFERS_ASSERT(buf_); + return buf_; + } + + uint8_t *scratch_end() const { + FLATBUFFERS_ASSERT(scratch_); + return scratch_; + } + + uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; } + + void push(const uint8_t *bytes, size_t num) { + memcpy(make_space(num), bytes, num); + } + + // Specialized version of push() that avoids memcpy call for small data. + template void push_small(const T &little_endian_t) { + make_space(sizeof(T)); + *reinterpret_cast(cur_) = little_endian_t; + } + + template void scratch_push_small(const T &t) { + ensure_space(sizeof(T)); + *reinterpret_cast(scratch_) = t; + scratch_ += sizeof(T); + } + + // fill() is most frequently called with small byte counts (<= 4), + // which is why we're using loops rather than calling memset. + void fill(size_t zero_pad_bytes) { + make_space(zero_pad_bytes); + for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0; + } + + // Version for when we know the size is larger. + void fill_big(size_t zero_pad_bytes) { + memset(make_space(zero_pad_bytes), 0, zero_pad_bytes); + } + + void pop(size_t bytes_to_remove) { cur_ += bytes_to_remove; } + void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; } + + void swap(vector_downward &other) { + using std::swap; + swap(allocator_, other.allocator_); + swap(own_allocator_, other.own_allocator_); + swap(initial_size_, other.initial_size_); + swap(buffer_minalign_, other.buffer_minalign_); + swap(reserved_, other.reserved_); + swap(buf_, other.buf_); + swap(cur_, other.cur_); + swap(scratch_, other.scratch_); + } + + void swap_allocator(vector_downward &other) { + using std::swap; + swap(allocator_, other.allocator_); + swap(own_allocator_, other.own_allocator_); + } + + private: + // You shouldn't really be copying instances of this class. + FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &)) + FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &)) + + Allocator *allocator_; + bool own_allocator_; + size_t initial_size_; + size_t buffer_minalign_; + size_t reserved_; + uint8_t *buf_; + uint8_t *cur_; // Points at location between empty (below) and used (above). + uint8_t *scratch_; // Points to the end of the scratchpad in use. + + void reallocate(size_t len) { + auto old_reserved = reserved_; + auto old_size = size(); + auto old_scratch_size = scratch_size(); + reserved_ += (std::max)(len, + old_reserved ? old_reserved / 2 : initial_size_); + reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1); + if (buf_) { + buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_, + old_size, old_scratch_size); + } else { + buf_ = Allocate(allocator_, reserved_); + } + cur_ = buf_ + reserved_ - old_size; + scratch_ = buf_ + old_scratch_size; + } +}; + +// Converts a Field ID to a virtual table offset. +inline voffset_t FieldIndexToOffset(voffset_t field_id) { + // Should correspond to what EndTable() below builds up. + const int fixed_fields = 2; // Vtable size and Object Size. + return static_cast((field_id + fixed_fields) * sizeof(voffset_t)); +} + +template +const T *data(const std::vector &v) { + return v.empty() ? nullptr : &v.front(); +} +template T *data(std::vector &v) { + return v.empty() ? nullptr : &v.front(); +} + +/// @endcond + +/// @addtogroup flatbuffers_cpp_api +/// @{ +/// @class FlatBufferBuilder +/// @brief Helper class to hold data needed in creation of a FlatBuffer. +/// To serialize data, you typically call one of the `Create*()` functions in +/// the generated code, which in turn call a sequence of `StartTable`/ +/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/ +/// `CreateVector` functions. Do this is depth-first order to build up a tree to +/// the root. `Finish()` wraps up the buffer ready for transport. +class FlatBufferBuilder { + public: + /// @brief Default constructor for FlatBufferBuilder. + /// @param[in] initial_size The initial size of the buffer, in bytes. Defaults + /// to `1024`. + /// @param[in] allocator An `Allocator` to use. If null will use + /// `DefaultAllocator`. + /// @param[in] own_allocator Whether the builder/vector should own the + /// allocator. Defaults to / `false`. + /// @param[in] buffer_minalign Force the buffer to be aligned to the given + /// minimum alignment upon reallocation. Only needed if you intend to store + /// types with custom alignment AND you wish to read the buffer in-place + /// directly after creation. + explicit FlatBufferBuilder(size_t initial_size = 1024, + Allocator *allocator = nullptr, + bool own_allocator = false, + size_t buffer_minalign = + AlignOf()) + : buf_(initial_size, allocator, own_allocator, buffer_minalign), + num_field_loc(0), + max_voffset_(0), + nested(false), + finished(false), + minalign_(1), + force_defaults_(false), + dedup_vtables_(true), + string_pool(nullptr) { + EndianCheck(); + } + + // clang-format off + /// @brief Move constructor for FlatBufferBuilder. + #if !defined(FLATBUFFERS_CPP98_STL) + FlatBufferBuilder(FlatBufferBuilder &&other) + #else + FlatBufferBuilder(FlatBufferBuilder &other) + #endif // #if !defined(FLATBUFFERS_CPP98_STL) + : buf_(1024, nullptr, false, AlignOf()), + num_field_loc(0), + max_voffset_(0), + nested(false), + finished(false), + minalign_(1), + force_defaults_(false), + dedup_vtables_(true), + string_pool(nullptr) { + EndianCheck(); + // Default construct and swap idiom. + // Lack of delegating constructors in vs2010 makes it more verbose than needed. + Swap(other); + } + // clang-format on + + // clang-format off + #if !defined(FLATBUFFERS_CPP98_STL) + // clang-format on + /// @brief Move assignment operator for FlatBufferBuilder. + FlatBufferBuilder &operator=(FlatBufferBuilder &&other) { + // Move construct a temporary and swap idiom + FlatBufferBuilder temp(std::move(other)); + Swap(temp); + return *this; + } + // clang-format off + #endif // defined(FLATBUFFERS_CPP98_STL) + // clang-format on + + void Swap(FlatBufferBuilder &other) { + using std::swap; + buf_.swap(other.buf_); + swap(num_field_loc, other.num_field_loc); + swap(max_voffset_, other.max_voffset_); + swap(nested, other.nested); + swap(finished, other.finished); + swap(minalign_, other.minalign_); + swap(force_defaults_, other.force_defaults_); + swap(dedup_vtables_, other.dedup_vtables_); + swap(string_pool, other.string_pool); + } + + ~FlatBufferBuilder() { + if (string_pool) delete string_pool; + } + + void Reset() { + Clear(); // clear builder state + buf_.reset(); // deallocate buffer + } + + /// @brief Reset all the state in this FlatBufferBuilder so it can be reused + /// to construct another buffer. + void Clear() { + ClearOffsets(); + buf_.clear(); + nested = false; + finished = false; + minalign_ = 1; + if (string_pool) string_pool->clear(); + } + + /// @brief The current size of the serialized buffer, counting from the end. + /// @return Returns an `uoffset_t` with the current size of the buffer. + uoffset_t GetSize() const { return buf_.size(); } + + /// @brief Get the serialized buffer (after you call `Finish()`). + /// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the + /// buffer. + uint8_t *GetBufferPointer() const { + Finished(); + return buf_.data(); + } + + /// @brief Get a pointer to an unfinished buffer. + /// @return Returns a `uint8_t` pointer to the unfinished buffer. + uint8_t *GetCurrentBufferPointer() const { return buf_.data(); } + + /// @brief Get the released pointer to the serialized buffer. + /// @warning Do NOT attempt to use this FlatBufferBuilder afterwards! + /// @return A `FlatBuffer` that owns the buffer and its allocator and + /// behaves similar to a `unique_ptr` with a deleter. + FLATBUFFERS_ATTRIBUTE(deprecated("use Release() instead")) DetachedBuffer + ReleaseBufferPointer() { + Finished(); + return buf_.release(); + } + + /// @brief Get the released DetachedBuffer. + /// @return A `DetachedBuffer` that owns the buffer and its allocator. + DetachedBuffer Release() { + Finished(); + return buf_.release(); + } + + /// @brief Get the released pointer to the serialized buffer. + /// @param The size of the memory block containing + /// the serialized `FlatBuffer`. + /// @param The offset from the released pointer where the finished + /// `FlatBuffer` starts. + /// @return A raw pointer to the start of the memory block containing + /// the serialized `FlatBuffer`. + /// @remark If the allocator is owned, it gets deleted when the destructor is called.. + uint8_t *ReleaseRaw(size_t &size, size_t &offset) { + Finished(); + return buf_.release_raw(size, offset); + } + + /// @brief get the minimum alignment this buffer needs to be accessed + /// properly. This is only known once all elements have been written (after + /// you call Finish()). You can use this information if you need to embed + /// a FlatBuffer in some other buffer, such that you can later read it + /// without first having to copy it into its own buffer. + size_t GetBufferMinAlignment() { + Finished(); + return minalign_; + } + + /// @cond FLATBUFFERS_INTERNAL + void Finished() const { + // If you get this assert, you're attempting to get access a buffer + // which hasn't been finished yet. Be sure to call + // FlatBufferBuilder::Finish with your root table. + // If you really need to access an unfinished buffer, call + // GetCurrentBufferPointer instead. + FLATBUFFERS_ASSERT(finished); + } + /// @endcond + + /// @brief In order to save space, fields that are set to their default value + /// don't get serialized into the buffer. + /// @param[in] bool fd When set to `true`, always serializes default values that are set. + /// Optional fields which are not set explicitly, will still not be serialized. + void ForceDefaults(bool fd) { force_defaults_ = fd; } + + /// @brief By default vtables are deduped in order to save space. + /// @param[in] bool dedup When set to `true`, dedup vtables. + void DedupVtables(bool dedup) { dedup_vtables_ = dedup; } + + /// @cond FLATBUFFERS_INTERNAL + void Pad(size_t num_bytes) { buf_.fill(num_bytes); } + + void TrackMinAlign(size_t elem_size) { + if (elem_size > minalign_) minalign_ = elem_size; + } + + void Align(size_t elem_size) { + TrackMinAlign(elem_size); + buf_.fill(PaddingBytes(buf_.size(), elem_size)); + } + + void PushFlatBuffer(const uint8_t *bytes, size_t size) { + PushBytes(bytes, size); + finished = true; + } + + void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); } + + void PopBytes(size_t amount) { buf_.pop(amount); } + + template void AssertScalarT() { + // The code assumes power of 2 sizes and endian-swap-ability. + static_assert(flatbuffers::is_scalar::value, "T must be a scalar type"); + } + + // Write a single aligned scalar to the buffer + template uoffset_t PushElement(T element) { + AssertScalarT(); + T litle_endian_element = EndianScalar(element); + Align(sizeof(T)); + buf_.push_small(litle_endian_element); + return GetSize(); + } + + template uoffset_t PushElement(Offset off) { + // Special case for offsets: see ReferTo below. + return PushElement(ReferTo(off.o)); + } + + // When writing fields, we track where they are, so we can create correct + // vtables later. + void TrackField(voffset_t field, uoffset_t off) { + FieldLoc fl = { off, field }; + buf_.scratch_push_small(fl); + num_field_loc++; + max_voffset_ = (std::max)(max_voffset_, field); + } + + // Like PushElement, but additionally tracks the field this represents. + template void AddElement(voffset_t field, T e, T def) { + // We don't serialize values equal to the default. + if (IsTheSameAs(e, def) && !force_defaults_) return; + auto off = PushElement(e); + TrackField(field, off); + } + + template void AddOffset(voffset_t field, Offset off) { + if (off.IsNull()) return; // Don't store. + AddElement(field, ReferTo(off.o), static_cast(0)); + } + + template void AddStruct(voffset_t field, const T *structptr) { + if (!structptr) return; // Default, don't store. + Align(AlignOf()); + buf_.push_small(*structptr); + TrackField(field, GetSize()); + } + + void AddStructOffset(voffset_t field, uoffset_t off) { + TrackField(field, off); + } + + // Offsets initially are relative to the end of the buffer (downwards). + // This function converts them to be relative to the current location + // in the buffer (when stored here), pointing upwards. + uoffset_t ReferTo(uoffset_t off) { + // Align to ensure GetSize() below is correct. + Align(sizeof(uoffset_t)); + // Offset must refer to something already in buffer. + FLATBUFFERS_ASSERT(off && off <= GetSize()); + return GetSize() - off + static_cast(sizeof(uoffset_t)); + } + + void NotNested() { + // If you hit this, you're trying to construct a Table/Vector/String + // during the construction of its parent table (between the MyTableBuilder + // and table.Finish(). + // Move the creation of these sub-objects to above the MyTableBuilder to + // not get this assert. + // Ignoring this assert may appear to work in simple cases, but the reason + // it is here is that storing objects in-line may cause vtable offsets + // to not fit anymore. It also leads to vtable duplication. + FLATBUFFERS_ASSERT(!nested); + // If you hit this, fields were added outside the scope of a table. + FLATBUFFERS_ASSERT(!num_field_loc); + } + + // From generated code (or from the parser), we call StartTable/EndTable + // with a sequence of AddElement calls in between. + uoffset_t StartTable() { + NotNested(); + nested = true; + return GetSize(); + } + + // This finishes one serialized object by generating the vtable if it's a + // table, comparing it against existing vtables, and writing the + // resulting vtable offset. + uoffset_t EndTable(uoffset_t start) { + // If you get this assert, a corresponding StartTable wasn't called. + FLATBUFFERS_ASSERT(nested); + // Write the vtable offset, which is the start of any Table. + // We fill it's value later. + auto vtableoffsetloc = PushElement(0); + // Write a vtable, which consists entirely of voffset_t elements. + // It starts with the number of offsets, followed by a type id, followed + // by the offsets themselves. In reverse: + // Include space for the last offset and ensure empty tables have a + // minimum size. + max_voffset_ = + (std::max)(static_cast(max_voffset_ + sizeof(voffset_t)), + FieldIndexToOffset(0)); + buf_.fill_big(max_voffset_); + auto table_object_size = vtableoffsetloc - start; + // Vtable use 16bit offsets. + FLATBUFFERS_ASSERT(table_object_size < 0x10000); + WriteScalar(buf_.data() + sizeof(voffset_t), + static_cast(table_object_size)); + WriteScalar(buf_.data(), max_voffset_); + // Write the offsets into the table + for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc); + it < buf_.scratch_end(); it += sizeof(FieldLoc)) { + auto field_location = reinterpret_cast(it); + auto pos = static_cast(vtableoffsetloc - field_location->off); + // If this asserts, it means you've set a field twice. + FLATBUFFERS_ASSERT( + !ReadScalar(buf_.data() + field_location->id)); + WriteScalar(buf_.data() + field_location->id, pos); + } + ClearOffsets(); + auto vt1 = reinterpret_cast(buf_.data()); + auto vt1_size = ReadScalar(vt1); + auto vt_use = GetSize(); + // See if we already have generated a vtable with this exact same + // layout before. If so, make it point to the old one, remove this one. + if (dedup_vtables_) { + for (auto it = buf_.scratch_data(); it < buf_.scratch_end(); + it += sizeof(uoffset_t)) { + auto vt_offset_ptr = reinterpret_cast(it); + auto vt2 = reinterpret_cast(buf_.data_at(*vt_offset_ptr)); + auto vt2_size = *vt2; + if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue; + vt_use = *vt_offset_ptr; + buf_.pop(GetSize() - vtableoffsetloc); + break; + } + } + // If this is a new vtable, remember it. + if (vt_use == GetSize()) { buf_.scratch_push_small(vt_use); } + // Fill the vtable offset we created above. + // The offset points from the beginning of the object to where the + // vtable is stored. + // Offsets default direction is downward in memory for future format + // flexibility (storing all vtables at the start of the file). + WriteScalar(buf_.data_at(vtableoffsetloc), + static_cast(vt_use) - + static_cast(vtableoffsetloc)); + + nested = false; + return vtableoffsetloc; + } + + FLATBUFFERS_ATTRIBUTE(deprecated("call the version above instead")) + uoffset_t EndTable(uoffset_t start, voffset_t /*numfields*/) { + return EndTable(start); + } + + // This checks a required field has been set in a given table that has + // just been constructed. + template void Required(Offset table, voffset_t field); + + uoffset_t StartStruct(size_t alignment) { + Align(alignment); + return GetSize(); + } + + uoffset_t EndStruct() { return GetSize(); } + + void ClearOffsets() { + buf_.scratch_pop(num_field_loc * sizeof(FieldLoc)); + num_field_loc = 0; + max_voffset_ = 0; + } + + // Aligns such that when "len" bytes are written, an object can be written + // after it with "alignment" without padding. + void PreAlign(size_t len, size_t alignment) { + TrackMinAlign(alignment); + buf_.fill(PaddingBytes(GetSize() + len, alignment)); + } + template void PreAlign(size_t len) { + AssertScalarT(); + PreAlign(len, sizeof(T)); + } + /// @endcond + + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const char pointer to the data to be stored as a string. + /// @param[in] len The number of bytes that should be stored from `str`. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateString(const char *str, size_t len) { + NotNested(); + PreAlign(len + 1); // Always 0-terminated. + buf_.fill(1); + PushBytes(reinterpret_cast(str), len); + PushElement(static_cast(len)); + return Offset(GetSize()); + } + + /// @brief Store a string in the buffer, which is null-terminated. + /// @param[in] str A const char pointer to a C-string to add to the buffer. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateString(const char *str) { + return CreateString(str, strlen(str)); + } + + /// @brief Store a string in the buffer, which is null-terminated. + /// @param[in] str A char pointer to a C-string to add to the buffer. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateString(char *str) { + return CreateString(str, strlen(str)); + } + + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const reference to a std::string to store in the buffer. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateString(const std::string &str) { + return CreateString(str.c_str(), str.length()); + } + + // clang-format off + #ifdef FLATBUFFERS_HAS_STRING_VIEW + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const string_view to copy in to the buffer. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateString(MindSpore.flatbuffers::string_view str) { + return CreateString(str.data(), str.size()); + } + #endif // FLATBUFFERS_HAS_STRING_VIEW + // clang-format on + + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const pointer to a `String` struct to add to the buffer. + /// @return Returns the offset in the buffer where the string starts + Offset CreateString(const String *str) { + return str ? CreateString(str->c_str(), str->size()) : 0; + } + + /// @brief Store a string in the buffer, which can contain any binary data. + /// @param[in] str A const reference to a std::string like type with support + /// of T::c_str() and T::length() to store in the buffer. + /// @return Returns the offset in the buffer where the string starts. + template Offset CreateString(const T &str) { + return CreateString(str.c_str(), str.length()); + } + + /// @brief Store a string in the buffer, which can contain any binary data. + /// If a string with this exact contents has already been serialized before, + /// instead simply returns the offset of the existing string. + /// @param[in] str A const char pointer to the data to be stored as a string. + /// @param[in] len The number of bytes that should be stored from `str`. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateSharedString(const char *str, size_t len) { + if (!string_pool) + string_pool = new StringOffsetMap(StringOffsetCompare(buf_)); + auto size_before_string = buf_.size(); + // Must first serialize the string, since the set is all offsets into + // buffer. + auto off = CreateString(str, len); + auto it = string_pool->find(off); + // If it exists we reuse existing serialized data! + if (it != string_pool->end()) { + // We can remove the string we serialized. + buf_.pop(buf_.size() - size_before_string); + return *it; + } + // Record this string for future use. + string_pool->insert(off); + return off; + } + + /// @brief Store a string in the buffer, which null-terminated. + /// If a string with this exact contents has already been serialized before, + /// instead simply returns the offset of the existing string. + /// @param[in] str A const char pointer to a C-string to add to the buffer. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateSharedString(const char *str) { + return CreateSharedString(str, strlen(str)); + } + + /// @brief Store a string in the buffer, which can contain any binary data. + /// If a string with this exact contents has already been serialized before, + /// instead simply returns the offset of the existing string. + /// @param[in] str A const reference to a std::string to store in the buffer. + /// @return Returns the offset in the buffer where the string starts. + Offset CreateSharedString(const std::string &str) { + return CreateSharedString(str.c_str(), str.length()); + } + + /// @brief Store a string in the buffer, which can contain any binary data. + /// If a string with this exact contents has already been serialized before, + /// instead simply returns the offset of the existing string. + /// @param[in] str A const pointer to a `String` struct to add to the buffer. + /// @return Returns the offset in the buffer where the string starts + Offset CreateSharedString(const String *str) { + return CreateSharedString(str->c_str(), str->size()); + } + + /// @cond FLATBUFFERS_INTERNAL + uoffset_t EndVector(size_t len) { + FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector. + nested = false; + return PushElement(static_cast(len)); + } + + void StartVector(size_t len, size_t elemsize) { + NotNested(); + nested = true; + PreAlign(len * elemsize); + PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t. + } + + // Call this right before StartVector/CreateVector if you want to force the + // alignment to be something different than what the element size would + // normally dictate. + // This is useful when storing a nested_flatbuffer in a vector of bytes, + // or when storing SIMD floats, etc. + void ForceVectorAlignment(size_t len, size_t elemsize, size_t alignment) { + PreAlign(len * elemsize, alignment); + } + + // Similar to ForceVectorAlignment but for String fields. + void ForceStringAlignment(size_t len, size_t alignment) { + PreAlign((len + 1) * sizeof(char), alignment); + } + + /// @endcond + + /// @brief Serialize an array into a FlatBuffer `vector`. + /// @tparam T The data type of the array elements. + /// @param[in] v A pointer to the array of type `T` to serialize into the + /// buffer as a `vector`. + /// @param[in] len The number of elements to serialize. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template Offset> CreateVector(const T *v, size_t len) { + // If this assert hits, you're specifying a template argument that is + // causing the wrong overload to be selected, remove it. + AssertScalarT(); + StartVector(len, sizeof(T)); + // clang-format off + #if FLATBUFFERS_LITTLEENDIAN + PushBytes(reinterpret_cast(v), len * sizeof(T)); + #else + if (sizeof(T) == 1) { + PushBytes(reinterpret_cast(v), len); + } else { + for (auto i = len; i > 0; ) { + PushElement(v[--i]); + } + } + #endif + // clang-format on + return Offset>(EndVector(len)); + } + + template + Offset>> CreateVector(const Offset *v, size_t len) { + StartVector(len, sizeof(Offset)); + for (auto i = len; i > 0;) { PushElement(v[--i]); } + return Offset>>(EndVector(len)); + } + + /// @brief Serialize a `std::vector` into a FlatBuffer `vector`. + /// @tparam T The data type of the `std::vector` elements. + /// @param v A const reference to the `std::vector` to serialize into the + /// buffer as a `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template Offset> CreateVector(const std::vector &v) { + return CreateVector(data(v), v.size()); + } + + // vector may be implemented using a bit-set, so we can't access it as + // an array. Instead, read elements manually. + // Background: https://isocpp.org/blog/2012/11/on-vectorbool + Offset> CreateVector(const std::vector &v) { + StartVector(v.size(), sizeof(uint8_t)); + for (auto i = v.size(); i > 0;) { + PushElement(static_cast(v[--i])); + } + return Offset>(EndVector(v.size())); + } + + // clang-format off + #ifndef FLATBUFFERS_CPP98_STL + /// @brief Serialize values returned by a function into a FlatBuffer `vector`. + /// This is a convenience function that takes care of iteration for you. + /// @tparam T The data type of the `std::vector` elements. + /// @param f A function that takes the current iteration 0..vector_size-1 and + /// returns any type that you can construct a FlatBuffers vector out of. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template Offset> CreateVector(size_t vector_size, + const std::function &f) { + std::vector elems(vector_size); + for (size_t i = 0; i < vector_size; i++) elems[i] = f(i); + return CreateVector(elems); + } + #endif + // clang-format on + + /// @brief Serialize values returned by a function into a FlatBuffer `vector`. + /// This is a convenience function that takes care of iteration for you. + /// @tparam T The data type of the `std::vector` elements. + /// @param f A function that takes the current iteration 0..vector_size-1, + /// and the state parameter returning any type that you can construct a + /// FlatBuffers vector out of. + /// @param state State passed to f. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVector(size_t vector_size, F f, S *state) { + std::vector elems(vector_size); + for (size_t i = 0; i < vector_size; i++) elems[i] = f(i, state); + return CreateVector(elems); + } + + /// @brief Serialize a `std::vector` into a FlatBuffer `vector`. + /// This is a convenience function for a common case. + /// @param v A const reference to the `std::vector` to serialize into the + /// buffer as a `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + Offset>> CreateVectorOfStrings( + const std::vector &v) { + std::vector> offsets(v.size()); + for (size_t i = 0; i < v.size(); i++) offsets[i] = CreateString(v[i]); + return CreateVector(offsets); + } + + /// @brief Serialize an array of structs into a FlatBuffer `vector`. + /// @tparam T The data type of the struct array elements. + /// @param[in] v A pointer to the array of type `T` to serialize into the + /// buffer as a `vector`. + /// @param[in] len The number of elements to serialize. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfStructs(const T *v, size_t len) { + StartVector(len * sizeof(T) / AlignOf(), AlignOf()); + PushBytes(reinterpret_cast(v), sizeof(T) * len); + return Offset>(EndVector(len)); + } + + /// @brief Serialize an array of native structs into a FlatBuffer `vector`. + /// @tparam T The data type of the struct array elements. + /// @tparam S The data type of the native struct array elements. + /// @param[in] v A pointer to the array of type `S` to serialize into the + /// buffer as a `vector`. + /// @param[in] len The number of elements to serialize. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfNativeStructs(const S *v, + size_t len) { + extern T Pack(const S &); + typedef T (*Pack_t)(const S &); + std::vector vv(len); + std::transform(v, v + len, vv.begin(), static_cast(Pack)); + return CreateVectorOfStructs(vv.data(), vv.size()); + } + + // clang-format off + #ifndef FLATBUFFERS_CPP98_STL + /// @brief Serialize an array of structs into a FlatBuffer `vector`. + /// @tparam T The data type of the struct array elements. + /// @param[in] f A function that takes the current iteration 0..vector_size-1 + /// and a pointer to the struct that must be filled. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + /// This is mostly useful when MindSpore.flatbuffers are generated with mutation + /// accessors. + template Offset> CreateVectorOfStructs( + size_t vector_size, const std::function &filler) { + T* structs = StartVectorOfStructs(vector_size); + for (size_t i = 0; i < vector_size; i++) { + filler(i, structs); + structs++; + } + return EndVectorOfStructs(vector_size); + } + #endif + // clang-format on + + /// @brief Serialize an array of structs into a FlatBuffer `vector`. + /// @tparam T The data type of the struct array elements. + /// @param[in] f A function that takes the current iteration 0..vector_size-1, + /// a pointer to the struct that must be filled and the state argument. + /// @param[in] state Arbitrary state to pass to f. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + /// This is mostly useful when MindSpore.flatbuffers are generated with mutation + /// accessors. + template + Offset> CreateVectorOfStructs(size_t vector_size, F f, + S *state) { + T *structs = StartVectorOfStructs(vector_size); + for (size_t i = 0; i < vector_size; i++) { + f(i, structs, state); + structs++; + } + return EndVectorOfStructs(vector_size); + } + + /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`. + /// @tparam T The data type of the `std::vector` struct elements. + /// @param[in]] v A const reference to the `std::vector` of structs to + /// serialize into the buffer as a `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfStructs( + const std::vector &v) { + return CreateVectorOfStructs(data(v), v.size()); + } + + /// @brief Serialize a `std::vector` of native structs into a FlatBuffer + /// `vector`. + /// @tparam T The data type of the `std::vector` struct elements. + /// @tparam S The data type of the `std::vector` native struct elements. + /// @param[in]] v A const reference to the `std::vector` of structs to + /// serialize into the buffer as a `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfNativeStructs( + const std::vector &v) { + return CreateVectorOfNativeStructs(data(v), v.size()); + } + + /// @cond FLATBUFFERS_INTERNAL + template struct StructKeyComparator { + bool operator()(const T &a, const T &b) const { + return a.KeyCompareLessThan(&b); + } + + private: + StructKeyComparator &operator=(const StructKeyComparator &); + }; + /// @endcond + + /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector` + /// in sorted order. + /// @tparam T The data type of the `std::vector` struct elements. + /// @param[in]] v A const reference to the `std::vector` of structs to + /// serialize into the buffer as a `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfSortedStructs(std::vector *v) { + return CreateVectorOfSortedStructs(data(*v), v->size()); + } + + /// @brief Serialize a `std::vector` of native structs into a FlatBuffer + /// `vector` in sorted order. + /// @tparam T The data type of the `std::vector` struct elements. + /// @tparam S The data type of the `std::vector` native struct elements. + /// @param[in]] v A const reference to the `std::vector` of structs to + /// serialize into the buffer as a `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfSortedNativeStructs( + std::vector *v) { + return CreateVectorOfSortedNativeStructs(data(*v), v->size()); + } + + /// @brief Serialize an array of structs into a FlatBuffer `vector` in sorted + /// order. + /// @tparam T The data type of the struct array elements. + /// @param[in] v A pointer to the array of type `T` to serialize into the + /// buffer as a `vector`. + /// @param[in] len The number of elements to serialize. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfSortedStructs(T *v, size_t len) { + std::sort(v, v + len, StructKeyComparator()); + return CreateVectorOfStructs(v, len); + } + + /// @brief Serialize an array of native structs into a FlatBuffer `vector` in + /// sorted order. + /// @tparam T The data type of the struct array elements. + /// @tparam S The data type of the native struct array elements. + /// @param[in] v A pointer to the array of type `S` to serialize into the + /// buffer as a `vector`. + /// @param[in] len The number of elements to serialize. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset> CreateVectorOfSortedNativeStructs(S *v, + size_t len) { + extern T Pack(const S &); + typedef T (*Pack_t)(const S &); + std::vector vv(len); + std::transform(v, v + len, vv.begin(), static_cast(Pack)); + return CreateVectorOfSortedStructs(vv, len); + } + + /// @cond FLATBUFFERS_INTERNAL + template struct TableKeyComparator { + TableKeyComparator(vector_downward &buf) : buf_(buf) {} + bool operator()(const Offset &a, const Offset &b) const { + auto table_a = reinterpret_cast(buf_.data_at(a.o)); + auto table_b = reinterpret_cast(buf_.data_at(b.o)); + return table_a->KeyCompareLessThan(table_b); + } + vector_downward &buf_; + + private: + TableKeyComparator &operator=(const TableKeyComparator &); + }; + /// @endcond + + /// @brief Serialize an array of `table` offsets as a `vector` in the buffer + /// in sorted order. + /// @tparam T The data type that the offset refers to. + /// @param[in] v An array of type `Offset` that contains the `table` + /// offsets to store in the buffer in sorted order. + /// @param[in] len The number of elements to store in the `vector`. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset>> CreateVectorOfSortedTables(Offset *v, + size_t len) { + std::sort(v, v + len, TableKeyComparator(buf_)); + return CreateVector(v, len); + } + + /// @brief Serialize an array of `table` offsets as a `vector` in the buffer + /// in sorted order. + /// @tparam T The data type that the offset refers to. + /// @param[in] v An array of type `Offset` that contains the `table` + /// offsets to store in the buffer in sorted order. + /// @return Returns a typed `Offset` into the serialized data indicating + /// where the vector is stored. + template + Offset>> CreateVectorOfSortedTables( + std::vector> *v) { + return CreateVectorOfSortedTables(data(*v), v->size()); + } + + /// @brief Specialized version of `CreateVector` for non-copying use cases. + /// Write the data any time later to the returned buffer pointer `buf`. + /// @param[in] len The number of elements to store in the `vector`. + /// @param[in] elemsize The size of each element in the `vector`. + /// @param[out] buf A pointer to a `uint8_t` pointer that can be + /// written to at a later time to serialize the data into a `vector` + /// in the buffer. + uoffset_t CreateUninitializedVector(size_t len, size_t elemsize, + uint8_t **buf) { + NotNested(); + StartVector(len, elemsize); + buf_.make_space(len * elemsize); + auto vec_start = GetSize(); + auto vec_end = EndVector(len); + *buf = buf_.data_at(vec_start); + return vec_end; + } + + /// @brief Specialized version of `CreateVector` for non-copying use cases. + /// Write the data any time later to the returned buffer pointer `buf`. + /// @tparam T The data type of the data that will be stored in the buffer + /// as a `vector`. + /// @param[in] len The number of elements to store in the `vector`. + /// @param[out] buf A pointer to a pointer of type `T` that can be + /// written to at a later time to serialize the data into a `vector` + /// in the buffer. + template + Offset> CreateUninitializedVector(size_t len, T **buf) { + AssertScalarT(); + return CreateUninitializedVector(len, sizeof(T), + reinterpret_cast(buf)); + } + + template + Offset> CreateUninitializedVectorOfStructs(size_t len, T **buf) { + return CreateUninitializedVector(len, sizeof(T), + reinterpret_cast(buf)); + } + + + // @brief Create a vector of scalar type T given as input a vector of scalar + // type U, useful with e.g. pre "enum class" enums, or any existing scalar + // data of the wrong type. + template + Offset> CreateVectorScalarCast(const U *v, size_t len) { + AssertScalarT(); + AssertScalarT(); + StartVector(len, sizeof(T)); + for (auto i = len; i > 0;) { PushElement(static_cast(v[--i])); } + return Offset>(EndVector(len)); + } + + /// @brief Write a struct by itself, typically to be part of a union. + template Offset CreateStruct(const T &structobj) { + NotNested(); + Align(AlignOf()); + buf_.push_small(structobj); + return Offset(GetSize()); + } + + /// @brief The length of a FlatBuffer file header. + static const size_t kFileIdentifierLength = 4; + + /// @brief Finish serializing a buffer by writing the root offset. + /// @param[in] file_identifier If a `file_identifier` is given, the buffer + /// will be prefixed with a standard FlatBuffers file header. + template + void Finish(Offset root, const char *file_identifier = nullptr) { + Finish(root.o, file_identifier, false); + } + + /// @brief Finish a buffer with a 32 bit size field pre-fixed (size of the + /// buffer following the size field). These buffers are NOT compatible + /// with standard buffers created by Finish, i.e. you can't call GetRoot + /// on them, you have to use GetSizePrefixedRoot instead. + /// All >32 bit quantities in this buffer will be aligned when the whole + /// size pre-fixed buffer is aligned. + /// These kinds of buffers are useful for creating a stream of FlatBuffers. + template + void FinishSizePrefixed(Offset root, + const char *file_identifier = nullptr) { + Finish(root.o, file_identifier, true); + } + + void SwapBufAllocator(FlatBufferBuilder &other) { + buf_.swap_allocator(other.buf_); + } + +protected: + + // You shouldn't really be copying instances of this class. + FlatBufferBuilder(const FlatBufferBuilder &); + FlatBufferBuilder &operator=(const FlatBufferBuilder &); + + void Finish(uoffset_t root, const char *file_identifier, bool size_prefix) { + NotNested(); + buf_.clear_scratch(); + // This will cause the whole buffer to be aligned. + PreAlign((size_prefix ? sizeof(uoffset_t) : 0) + sizeof(uoffset_t) + + (file_identifier ? kFileIdentifierLength : 0), + minalign_); + if (file_identifier) { + FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength); + PushBytes(reinterpret_cast(file_identifier), + kFileIdentifierLength); + } + PushElement(ReferTo(root)); // Location of root. + if (size_prefix) { PushElement(GetSize()); } + finished = true; + } + + struct FieldLoc { + uoffset_t off; + voffset_t id; + }; + + vector_downward buf_; + + // Accumulating offsets of table members while it is being built. + // We store these in the scratch pad of buf_, after the vtable offsets. + uoffset_t num_field_loc; + // Track how much of the vtable is in use, so we can output the most compact + // possible vtable. + voffset_t max_voffset_; + + // Ensure objects are not nested. + bool nested; + + // Ensure the buffer is finished before it is being accessed. + bool finished; + + size_t minalign_; + + bool force_defaults_; // Serialize values equal to their defaults anyway. + + bool dedup_vtables_; + + struct StringOffsetCompare { + StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {} + bool operator()(const Offset &a, const Offset &b) const { + auto stra = reinterpret_cast(buf_->data_at(a.o)); + auto strb = reinterpret_cast(buf_->data_at(b.o)); + return StringLessThan(stra->data(), stra->size(), + strb->data(), strb->size()); + } + const vector_downward *buf_; + }; + + // For use with CreateSharedString. Instantiated on first use only. + typedef std::set, StringOffsetCompare> StringOffsetMap; + StringOffsetMap *string_pool; + + private: + // Allocates space for a vector of structures. + // Must be completed with EndVectorOfStructs(). + template T *StartVectorOfStructs(size_t vector_size) { + StartVector(vector_size * sizeof(T) / AlignOf(), AlignOf()); + return reinterpret_cast(buf_.make_space(vector_size * sizeof(T))); + } + + // End the vector of structues in the MindSpore.flatbuffers. + // Vector should have previously be started with StartVectorOfStructs(). + template + Offset> EndVectorOfStructs(size_t vector_size) { + return Offset>(EndVector(vector_size)); + } +}; +/// @} + +/// @cond FLATBUFFERS_INTERNAL +// Helpers to get a typed pointer to the root object contained in the buffer. +template T *GetMutableRoot(void *buf) { + EndianCheck(); + return reinterpret_cast( + reinterpret_cast(buf) + + EndianScalar(*reinterpret_cast(buf))); +} + +template const T *GetRoot(const void *buf) { + return GetMutableRoot(const_cast(buf)); +} + +template const T *GetSizePrefixedRoot(const void *buf) { + return GetRoot(reinterpret_cast(buf) + sizeof(uoffset_t)); +} + +/// Helpers to get a typed pointer to objects that are currently being built. +/// @warning Creating new objects will lead to reallocations and invalidates +/// the pointer! +template +T *GetMutableTemporaryPointer(FlatBufferBuilder &fbb, Offset offset) { + return reinterpret_cast(fbb.GetCurrentBufferPointer() + fbb.GetSize() - + offset.o); +} + +template +const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset offset) { + return GetMutableTemporaryPointer(fbb, offset); +} + +/// @brief Get a pointer to the the file_identifier section of the buffer. +/// @return Returns a const char pointer to the start of the file_identifier +/// characters in the buffer. The returned char * has length +/// 'MindSpore.flatbuffers::FlatBufferBuilder::kFileIdentifierLength'. +/// This function is UNDEFINED for FlatBuffers whose MindSpore.schema does not include +/// a file_identifier (likely points at padding or the start of a the root +/// vtable). +inline const char *GetBufferIdentifier(const void *buf, bool size_prefixed = false) { + return reinterpret_cast(buf) + + ((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t)); +} + +// Helper to see if the identifier in a buffer has the expected value. +inline bool BufferHasIdentifier(const void *buf, const char *identifier, bool size_prefixed = false) { + return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier, + FlatBufferBuilder::kFileIdentifierLength) == 0; +} + +// Helper class to verify the integrity of a FlatBuffer +class Verifier FLATBUFFERS_FINAL_CLASS { + public: + Verifier(const uint8_t *buf, size_t buf_len, uoffset_t _max_depth = 64, + uoffset_t _max_tables = 1000000, bool _check_alignment = true) + : buf_(buf), + size_(buf_len), + depth_(0), + max_depth_(_max_depth), + num_tables_(0), + max_tables_(_max_tables), + upper_bound_(0), + check_alignment_(_check_alignment) + { + FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE); + } + + // Central location where any verification failures register. + bool Check(bool ok) const { + // clang-format off + #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE + FLATBUFFERS_ASSERT(ok); + #endif + #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE + if (!ok) + upper_bound_ = 0; + #endif + // clang-format on + return ok; + } + + // Verify any range within the buffer. + bool Verify(size_t elem, size_t elem_len) const { + // clang-format off + #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE + auto upper_bound = elem + elem_len; + if (upper_bound_ < upper_bound) + upper_bound_ = upper_bound; + #endif + // clang-format on + return Check(elem_len < size_ && elem <= size_ - elem_len); + } + + template bool VerifyAlignment(size_t elem) const { + return (elem & (sizeof(T) - 1)) == 0 || !check_alignment_; + } + + // Verify a range indicated by sizeof(T). + template bool Verify(size_t elem) const { + return VerifyAlignment(elem) && Verify(elem, sizeof(T)); + } + + // Verify relative to a known-good base pointer. + bool Verify(const uint8_t *base, voffset_t elem_off, size_t elem_len) const { + return Verify(static_cast(base - buf_) + elem_off, elem_len); + } + + template bool Verify(const uint8_t *base, voffset_t elem_off) + const { + return Verify(static_cast(base - buf_) + elem_off, sizeof(T)); + } + + // Verify a pointer (may be NULL) of a table type. + template bool VerifyTable(const T *table) { + return !table || table->Verify(*this); + } + + // Verify a pointer (may be NULL) of any vector type. + template bool VerifyVector(const Vector *vec) const { + return !vec || VerifyVectorOrString(reinterpret_cast(vec), + sizeof(T)); + } + + // Verify a pointer (may be NULL) of a vector to struct. + template bool VerifyVector(const Vector *vec) const { + return VerifyVector(reinterpret_cast *>(vec)); + } + + // Verify a pointer (may be NULL) to string. + bool VerifyString(const String *str) const { + size_t end; + return !str || + (VerifyVectorOrString(reinterpret_cast(str), + 1, &end) && + Verify(end, 1) && // Must have terminator + Check(buf_[end] == '\0')); // Terminating byte must be 0. + } + + // Common code between vectors and strings. + bool VerifyVectorOrString(const uint8_t *vec, size_t elem_size, + size_t *end = nullptr) const { + auto veco = static_cast(vec - buf_); + // Check we can read the size field. + if (!Verify(veco)) return false; + // Check the whole array. If this is a string, the byte past the array + // must be 0. + auto size = ReadScalar(vec); + auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size; + if (!Check(size < max_elems)) + return false; // Protect against byte_size overflowing. + auto byte_size = sizeof(size) + elem_size * size; + if (end) *end = veco + byte_size; + return Verify(veco, byte_size); + } + + // Special case for string contents, after the above has been called. + bool VerifyVectorOfStrings(const Vector> *vec) const { + if (vec) { + for (uoffset_t i = 0; i < vec->size(); i++) { + if (!VerifyString(vec->Get(i))) return false; + } + } + return true; + } + + // Special case for table contents, after the above has been called. + template bool VerifyVectorOfTables(const Vector> *vec) { + if (vec) { + for (uoffset_t i = 0; i < vec->size(); i++) { + if (!vec->Get(i)->Verify(*this)) return false; + } + } + return true; + } + + bool VerifyTableStart(const uint8_t *table) { + // Check the vtable offset. + auto tableo = static_cast(table - buf_); + if (!Verify(tableo)) return false; + // This offset may be signed, but doing the substraction unsigned always + // gives the result we want. + auto vtableo = tableo - static_cast(ReadScalar(table)); + // Check the vtable size field, then check vtable fits in its entirety. + return VerifyComplexity() && Verify(vtableo) && + VerifyAlignment(ReadScalar(buf_ + vtableo)) && + Verify(vtableo, ReadScalar(buf_ + vtableo)); + } + + template + bool VerifyBufferFromStart(const char *identifier, size_t start) { + if (identifier && + (size_ < 2 * sizeof(flatbuffers::uoffset_t) || + !BufferHasIdentifier(buf_ + start, identifier))) { + return false; + } + + // Call T::Verify, which must be in the generated code for this type. + auto o = VerifyOffset(start); + return o && reinterpret_cast(buf_ + start + o)->Verify(*this) + // clang-format off + #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE + && GetComputedSize() + #endif + ; + // clang-format on + } + + // Verify this whole buffer, starting with root type T. + template bool VerifyBuffer() { return VerifyBuffer(nullptr); } + + template bool VerifyBuffer(const char *identifier) { + return VerifyBufferFromStart(identifier, 0); + } + + template bool VerifySizePrefixedBuffer(const char *identifier) { + return Verify(0U) && + ReadScalar(buf_) == size_ - sizeof(uoffset_t) && + VerifyBufferFromStart(identifier, sizeof(uoffset_t)); + } + + uoffset_t VerifyOffset(size_t start) const { + if (!Verify(start)) return 0; + auto o = ReadScalar(buf_ + start); + // May not point to itself. + if (!Check(o != 0)) return 0; + // Can't wrap around / buffers are max 2GB. + if (!Check(static_cast(o) >= 0)) return 0; + // Must be inside the buffer to create a pointer from it (pointer outside + // buffer is UB). + if (!Verify(start + o, 1)) return 0; + return o; + } + + uoffset_t VerifyOffset(const uint8_t *base, voffset_t start) const { + return VerifyOffset(static_cast(base - buf_) + start); + } + + // Called at the start of a table to increase counters measuring data + // structure depth and amount, and possibly bails out with false if + // limits set by the constructor have been hit. Needs to be balanced + // with EndTable(). + bool VerifyComplexity() { + depth_++; + num_tables_++; + return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_); + } + + // Called at the end of a table to pop the depth count. + bool EndTable() { + depth_--; + return true; + } + + // Returns the message size in bytes + size_t GetComputedSize() const { + // clang-format off + #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE + uintptr_t size = upper_bound_; + // Align the size to uoffset_t + size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1); + return (size > size_) ? 0 : size; + #else + // Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work. + (void)upper_bound_; + FLATBUFFERS_ASSERT(false); + return 0; + #endif + // clang-format on + } + + private: + const uint8_t *buf_; + size_t size_; + uoffset_t depth_; + uoffset_t max_depth_; + uoffset_t num_tables_; + uoffset_t max_tables_; + mutable size_t upper_bound_; + bool check_alignment_; +}; + +// Convenient way to bundle a buffer and its length, to pass it around +// typed by its root. +// A BufferRef does not own its buffer. +struct BufferRefBase {}; // for std::is_base_of +template struct BufferRef : BufferRefBase { + BufferRef() : buf(nullptr), len(0), must_free(false) {} + BufferRef(uint8_t *_buf, uoffset_t _len) + : buf(_buf), len(_len), must_free(false) {} + + ~BufferRef() { + if (must_free) free(buf); + } + + const T *GetRoot() const { return flatbuffers::GetRoot(buf); } + + bool Verify() { + Verifier verifier(buf, len); + return verifier.VerifyBuffer(nullptr); + } + + uint8_t *buf; + uoffset_t len; + bool must_free; +}; + +// "structs" are flat structures that do not have an offset table, thus +// always have all members present and do not support forwards/backwards +// compatible extensions. + +class Struct FLATBUFFERS_FINAL_CLASS { + public: + template T GetField(uoffset_t o) const { + return ReadScalar(&data_[o]); + } + + template T GetStruct(uoffset_t o) const { + return reinterpret_cast(&data_[o]); + } + + const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; } + uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; } + + private: + uint8_t data_[1]; +}; + +// "tables" use an offset table (possibly shared) that allows fields to be +// omitted and added at will, but uses an extra indirection to read. +class Table { + public: + const uint8_t *GetVTable() const { + return data_ - ReadScalar(data_); + } + + // This gets the field offset for any of the functions below it, or 0 + // if the field was not present. + voffset_t GetOptionalFieldOffset(voffset_t field) const { + // The vtable offset is always at the start. + auto vtable = GetVTable(); + // The first element is the size of the vtable (fields + type id + itself). + auto vtsize = ReadScalar(vtable); + // If the field we're accessing is outside the vtable, we're reading older + // data, so it's the same as if the offset was 0 (not present). + return field < vtsize ? ReadScalar(vtable + field) : 0; + } + + template T GetField(voffset_t field, T defaultval) const { + auto field_offset = GetOptionalFieldOffset(field); + return field_offset ? ReadScalar(data_ + field_offset) : defaultval; + } + + template P GetPointer(voffset_t field) { + auto field_offset = GetOptionalFieldOffset(field); + auto p = data_ + field_offset; + return field_offset ? reinterpret_cast

(p + ReadScalar(p)) + : nullptr; + } + template P GetPointer(voffset_t field) const { + return const_cast(this)->GetPointer

(field); + } + + template P GetStruct(voffset_t field) const { + auto field_offset = GetOptionalFieldOffset(field); + auto p = const_cast(data_ + field_offset); + return field_offset ? reinterpret_cast

(p) : nullptr; + } + + template bool SetField(voffset_t field, T val, T def) { + auto field_offset = GetOptionalFieldOffset(field); + if (!field_offset) return IsTheSameAs(val, def); + WriteScalar(data_ + field_offset, val); + return true; + } + + bool SetPointer(voffset_t field, const uint8_t *val) { + auto field_offset = GetOptionalFieldOffset(field); + if (!field_offset) return false; + WriteScalar(data_ + field_offset, + static_cast(val - (data_ + field_offset))); + return true; + } + + uint8_t *GetAddressOf(voffset_t field) { + auto field_offset = GetOptionalFieldOffset(field); + return field_offset ? data_ + field_offset : nullptr; + } + const uint8_t *GetAddressOf(voffset_t field) const { + return const_cast

(this)->GetAddressOf(field); + } + + bool CheckField(voffset_t field) const { + return GetOptionalFieldOffset(field) != 0; + } + + // Verify the vtable of this table. + // Call this once per table, followed by VerifyField once per field. + bool VerifyTableStart(Verifier &verifier) const { + return verifier.VerifyTableStart(data_); + } + + // Verify a particular field. + template + bool VerifyField(const Verifier &verifier, voffset_t field) const { + // Calling GetOptionalFieldOffset should be safe now thanks to + // VerifyTable(). + auto field_offset = GetOptionalFieldOffset(field); + // Check the actual field. + return !field_offset || verifier.Verify(data_, field_offset); + } + + // VerifyField for required fields. + template + bool VerifyFieldRequired(const Verifier &verifier, voffset_t field) const { + auto field_offset = GetOptionalFieldOffset(field); + return verifier.Check(field_offset != 0) && + verifier.Verify(data_, field_offset); + } + + // Versions for offsets. + bool VerifyOffset(const Verifier &verifier, voffset_t field) const { + auto field_offset = GetOptionalFieldOffset(field); + return !field_offset || verifier.VerifyOffset(data_, field_offset); + } + + bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const { + auto field_offset = GetOptionalFieldOffset(field); + return verifier.Check(field_offset != 0) && + verifier.VerifyOffset(data_, field_offset); + } + + private: + // private constructor & copy constructor: you obtain instances of this + // class by pointing to existing data only + Table(); + Table(const Table &other); + + uint8_t data_[1]; +}; + +template void FlatBufferBuilder::Required(Offset table, + voffset_t field) { + auto table_ptr = reinterpret_cast(buf_.data_at(table.o)); + bool ok = table_ptr->GetOptionalFieldOffset(field) != 0; + // If this fails, the caller will show what field needs to be set. + FLATBUFFERS_ASSERT(ok); + (void)ok; +} + +/// @brief This can compute the start of a FlatBuffer from a root pointer, i.e. +/// it is the opposite transformation of GetRoot(). +/// This may be useful if you want to pass on a root and have the recipient +/// delete the buffer afterwards. +inline const uint8_t *GetBufferStartFromRootPointer(const void *root) { + auto table = reinterpret_cast(root); + auto vtable = table->GetVTable(); + // Either the vtable is before the root or after the root. + auto start = (std::min)(vtable, reinterpret_cast(root)); + // Align to at least sizeof(uoffset_t). + start = reinterpret_cast(reinterpret_cast(start) & + ~(sizeof(uoffset_t) - 1)); + // Additionally, there may be a file_identifier in the buffer, and the root + // offset. The buffer may have been aligned to any size between + // sizeof(uoffset_t) and FLATBUFFERS_MAX_ALIGNMENT (see "force_align"). + // Sadly, the exact alignment is only known when constructing the buffer, + // since it depends on the presence of values with said alignment properties. + // So instead, we simply look at the next uoffset_t values (root, + // file_identifier, and alignment padding) to see which points to the root. + // None of the other values can "impersonate" the root since they will either + // be 0 or four ASCII characters. + static_assert(FlatBufferBuilder::kFileIdentifierLength == sizeof(uoffset_t), + "file_identifier is assumed to be the same size as uoffset_t"); + for (auto possible_roots = FLATBUFFERS_MAX_ALIGNMENT / sizeof(uoffset_t) + 1; + possible_roots; possible_roots--) { + start -= sizeof(uoffset_t); + if (ReadScalar(start) + start == + reinterpret_cast(root)) + return start; + } + // We didn't find the root, either the "root" passed isn't really a root, + // or the buffer is corrupt. + // Assert, because calling this function with bad data may cause reads + // outside of buffer boundaries. + FLATBUFFERS_ASSERT(false); + return nullptr; +} + +/// @brief This return the prefixed size of a FlatBuffer. +inline uoffset_t GetPrefixedSize(const uint8_t* buf){ return ReadScalar(buf); } + +// Base class for native objects (FlatBuffer data de-serialized into native +// C++ data structures). +// Contains no functionality, purely documentative. +struct NativeTable {}; + +/// @brief Function types to be used with resolving hashes into objects and +/// back again. The resolver gets a pointer to a field inside an object API +/// object that is of the type specified in the MindSpore.schema using the attribute +/// `cpp_type` (it is thus important whatever you write to this address +/// matches that type). The value of this field is initially null, so you +/// may choose to implement a delayed binding lookup using this function +/// if you wish. The resolver does the opposite lookup, for when the object +/// is being serialized again. +typedef uint64_t hash_value_t; +// clang-format off +#ifdef FLATBUFFERS_CPP98_STL + typedef void (*resolver_function_t)(void **pointer_adr, hash_value_t hash); + typedef hash_value_t (*rehasher_function_t)(void *pointer); +#else + typedef std::function + resolver_function_t; + typedef std::function rehasher_function_t; +#endif +// clang-format on + +// Helper function to test if a field is present, using any of the field +// enums in the generated code. +// `table` must be a generated table type. Since this is a template parameter, +// this is not typechecked to be a subclass of Table, so beware! +// Note: this function will return false for fields equal to the default +// value, since they're not stored in the buffer (unless force_defaults was +// used). +template +bool IsFieldPresent(const T *table, typename T::FlatBuffersVTableOffset field) { + // Cast, since Table is a private baseclass of any table types. + return reinterpret_cast(table)->CheckField( + static_cast(field)); +} + +// Utility function for reverse lookups on the EnumNames*() functions +// (in the generated C++ code) +// names must be NULL terminated. +inline int LookupEnum(const char **names, const char *name) { + for (const char **p = names; *p; p++) + if (!strcmp(*p, name)) return static_cast(p - names); + return -1; +} + +// These macros allow us to layout a struct with a guarantee that they'll end +// up looking the same on different compilers and platforms. +// It does this by disallowing the compiler to do any padding, and then +// does padding itself by inserting extra padding fields that make every +// element aligned to its own size. +// Additionally, it manually sets the alignment of the struct as a whole, +// which is typically its largest element, or a custom size set in the MindSpore.schema +// by the force_align attribute. +// These are used in the generated code only. + +// clang-format off +#if defined(_MSC_VER) + #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \ + __pragma(pack(1)) \ + struct __declspec(align(alignment)) + #define FLATBUFFERS_STRUCT_END(name, size) \ + __pragma(pack()) \ + static_assert(sizeof(name) == size, "compiler breaks packing rules") +#elif defined(__GNUC__) || defined(__clang__) + #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \ + _Pragma("pack(1)") \ + struct __attribute__((aligned(alignment))) + #define FLATBUFFERS_STRUCT_END(name, size) \ + _Pragma("pack()") \ + static_assert(sizeof(name) == size, "compiler breaks packing rules") +#else + #error Unknown compiler, please define structure alignment macros +#endif +// clang-format on + +// Minimal reflection via code generation. +// Besides full-fat reflection (see reflection.h) and parsing/printing by +// loading schemas (see idl.h), we can also have code generation for mimimal +// reflection data which allows pretty-printing and other uses without needing +// a MindSpore.schema or a parser. +// Generate code with --reflect-types (types only) or --reflect-names (names +// also) to enable. +// See minireflect.h for utilities using this functionality. + +// These types are organized slightly differently as the ones in idl.h. +enum SequenceType { ST_TABLE, ST_STRUCT, ST_UNION, ST_ENUM }; + +// Scalars have the same order as in idl.h +// clang-format off +#define FLATBUFFERS_GEN_ELEMENTARY_TYPES(ET) \ + ET(ET_UTYPE) \ + ET(ET_BOOL) \ + ET(ET_CHAR) \ + ET(ET_UCHAR) \ + ET(ET_SHORT) \ + ET(ET_USHORT) \ + ET(ET_INT) \ + ET(ET_UINT) \ + ET(ET_LONG) \ + ET(ET_ULONG) \ + ET(ET_FLOAT) \ + ET(ET_DOUBLE) \ + ET(ET_STRING) \ + ET(ET_SEQUENCE) // See SequenceType. + +enum ElementaryType { + #define FLATBUFFERS_ET(E) E, + FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET) + #undef FLATBUFFERS_ET +}; + +inline const char * const *ElementaryTypeNames() { + static const char * const names[] = { + #define FLATBUFFERS_ET(E) #E, + FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET) + #undef FLATBUFFERS_ET + }; + return names; +} +// clang-format on + +// Basic type info cost just 16bits per field! +struct TypeCode { + uint16_t base_type : 4; // ElementaryType + uint16_t is_vector : 1; + int16_t sequence_ref : 11; // Index into type_refs below, or -1 for none. +}; + +static_assert(sizeof(TypeCode) == 2, "TypeCode"); + +struct TypeTable; + +// Signature of the static method present in each type. +typedef const TypeTable *(*TypeFunction)(); + +struct TypeTable { + SequenceType st; + size_t num_elems; // of type_codes, values, names (but not type_refs). + const TypeCode *type_codes; // num_elems count + const TypeFunction *type_refs; // less than num_elems entries (see TypeCode). + const int64_t *values; // Only set for non-consecutive enum/union or structs. + const char * const *names; // Only set if compiled with --reflect-names. +}; + +// String which identifies the current version of FlatBuffers. +// flatbuffer_version_string is used by Google developers to identify which +// applications uploaded to Google Play are using this library. This allows +// the development team at Google to determine the popularity of the library. +// How it works: Applications that are uploaded to the Google Play Store are +// scanned for this version string. We track which applications are using it +// to measure popularity. You are free to remove it (of course) but we would +// appreciate if you left it in. + +// Weak linkage is culled by VS & doesn't work on cygwin. +// clang-format off +#if !defined(_WIN32) && !defined(__CYGWIN__) + +extern volatile __attribute__((weak)) const char *flatbuffer_version_string; +volatile __attribute__((weak)) const char *flatbuffer_version_string = + "FlatBuffers " + FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "." + FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "." + FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION); + +#endif // !defined(_WIN32) && !defined(__CYGWIN__) + +#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\ + inline E operator | (E lhs, E rhs){\ + return E(T(lhs) | T(rhs));\ + }\ + inline E operator & (E lhs, E rhs){\ + return E(T(lhs) & T(rhs));\ + }\ + inline E operator ^ (E lhs, E rhs){\ + return E(T(lhs) ^ T(rhs));\ + }\ + inline E operator ~ (E lhs){\ + return E(~T(lhs));\ + }\ + inline E operator |= (E &lhs, E rhs){\ + lhs = lhs | rhs;\ + return lhs;\ + }\ + inline E operator &= (E &lhs, E rhs){\ + lhs = lhs & rhs;\ + return lhs;\ + }\ + inline E operator ^= (E &lhs, E rhs){\ + lhs = lhs ^ rhs;\ + return lhs;\ + }\ + inline bool operator !(E rhs) \ + {\ + return !bool(T(rhs)); \ + } +/// @endcond +} // namespace MindSpore.flatbuffers + +// clang-format on + +#endif // FLATBUFFERS_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/base.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/base.h new file mode 100644 index 0000000000000000000000000000000000000000..53244aa30ba67294a908df70eb3ac4f5870f3efc --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/base.h @@ -0,0 +1,379 @@ +#ifndef FLATBUFFERS_BASE_H_ +#define FLATBUFFERS_BASE_H_ + +// clang-format off + +// If activate should be declared and included first. +#if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \ + defined(_MSC_VER) && defined(_DEBUG) + // The _CRTDBG_MAP_ALLOC inside will replace + // calloc/free (etc) to its debug version using #define directives. + #define _CRTDBG_MAP_ALLOC + #include + #include + // Replace operator new by trace-enabled version. + #define DEBUG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__) + #define new DEBUG_NEW +#endif + +#if !defined(FLATBUFFERS_ASSERT) +#include +#define FLATBUFFERS_ASSERT assert +#elif defined(FLATBUFFERS_ASSERT_INCLUDE) +// Include file with forward declaration +#include FLATBUFFERS_ASSERT_INCLUDE +#endif + +#ifndef ARDUINO +#include +#endif + +#include +#include +#include + +#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H) + #include +#else + #include +#endif + +#include +#include +#include +#include +#include +#include +#include + +#ifdef _STLPORT_VERSION + #define FLATBUFFERS_CPP98_STL +#endif +#ifndef FLATBUFFERS_CPP98_STL + #include +#endif + +#include "stl_emulation.h" + +// Note the __clang__ check is needed, because clang presents itself +// as an older GNUC compiler (4.2). +// Clang 3.3 and later implement all of the ISO C++ 2011 standard. +// Clang 3.4 and later implement all of the ISO C++ 2014 standard. +// http://clang.llvm.org/cxx_status.html + +// Note the MSVC value '__cplusplus' may be incorrect: +// The '__cplusplus' predefined macro in the MSVC stuck at the value 199711L, +// indicating (erroneously!) that the compiler conformed to the C++98 Standard. +// This value should be correct starting from MSVC2017-15.7-Preview-3. +// The '__cplusplus' will be valid only if MSVC2017-15.7-P3 and the `/Zc:__cplusplus` switch is set. +// Workaround (for details see MSDN): +// Use the _MSC_VER and _MSVC_LANG definition instead of the __cplusplus for compatibility. +// The _MSVC_LANG macro reports the Standard version regardless of the '/Zc:__cplusplus' switch. + +#if defined(__GNUC__) && !defined(__clang__) + #define FLATBUFFERS_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#else + #define FLATBUFFERS_GCC 0 +#endif + +#if defined(__clang__) + #define FLATBUFFERS_CLANG (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) +#else + #define FLATBUFFERS_CLANG 0 +#endif + +/// @cond FLATBUFFERS_INTERNAL +#if __cplusplus <= 199711L && \ + (!defined(_MSC_VER) || _MSC_VER < 1600) && \ + (!defined(__GNUC__) || \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400)) + #error A C++11 compatible compiler with support for the auto typing is \ + required for FlatBuffers. + #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__ +#endif + +#if !defined(__clang__) && \ + defined(__GNUC__) && \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40600) + // Backwards compatability for g++ 4.4, and 4.5 which don't have the nullptr + // and constexpr keywords. Note the __clang__ check is needed, because clang + // presents itself as an older GNUC compiler. + #ifndef nullptr_t + const class nullptr_t { + public: + template inline operator T*() const { return 0; } + private: + void operator&() const; + } nullptr = {}; + #endif + #ifndef constexpr + #define constexpr const + #endif +#endif + +// The wire format uses a little endian encoding (since that's efficient for +// the common platforms). +#if defined(__s390x__) + #define FLATBUFFERS_LITTLEENDIAN 0 +#endif // __s390x__ +#if !defined(FLATBUFFERS_LITTLEENDIAN) + #if defined(__GNUC__) || defined(__clang__) + #if (defined(__BIG_ENDIAN__) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + #define FLATBUFFERS_LITTLEENDIAN 0 + #else + #define FLATBUFFERS_LITTLEENDIAN 1 + #endif // __BIG_ENDIAN__ + #elif defined(_MSC_VER) + #if defined(_M_PPC) + #define FLATBUFFERS_LITTLEENDIAN 0 + #else + #define FLATBUFFERS_LITTLEENDIAN 1 + #endif + #else + #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN. + #endif +#endif // !defined(FLATBUFFERS_LITTLEENDIAN) + +#define FLATBUFFERS_VERSION_MAJOR 1 +#define FLATBUFFERS_VERSION_MINOR 11 +#define FLATBUFFERS_VERSION_REVISION 0 +#define FLATBUFFERS_STRING_EXPAND(X) #X +#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X) + +#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \ + (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407)) || \ + defined(__clang__) + #define FLATBUFFERS_FINAL_CLASS final + #define FLATBUFFERS_OVERRIDE override + #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t +#else + #define FLATBUFFERS_FINAL_CLASS + #define FLATBUFFERS_OVERRIDE + #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE +#endif + +#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \ + (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \ + (defined(__cpp_constexpr) && __cpp_constexpr >= 200704) + #define FLATBUFFERS_CONSTEXPR constexpr +#else + #define FLATBUFFERS_CONSTEXPR const +#endif + +#if (defined(__cplusplus) && __cplusplus >= 201402L) || \ + (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) + #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR +#else + #define FLATBUFFERS_CONSTEXPR_CPP14 +#endif + +#if (defined(__GXX_EXPERIMENTAL_CXX0X__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \ + (defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023026)) || \ + defined(__clang__) + #define FLATBUFFERS_NOEXCEPT noexcept +#else + #define FLATBUFFERS_NOEXCEPT +#endif + +// NOTE: the FLATBUFFERS_DELETE_FUNC macro may change the access mode to +// private, so be sure to put it at the end or reset access mode explicitly. +#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \ + (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \ + defined(__clang__) + #define FLATBUFFERS_DELETE_FUNC(func) func = delete; +#else + #define FLATBUFFERS_DELETE_FUNC(func) private: func; +#endif + +#ifndef FLATBUFFERS_HAS_STRING_VIEW + // Only provide flatbuffers::string_view if __has_include can be used + // to detect a header that provides an implementation + #if defined(__has_include) + // Check for std::string_view (in c++17) + #if __has_include() && (__cplusplus >= 201606 || _HAS_CXX17) + #include + namespace flatbuffers { + typedef std::string_view string_view; + } + #define FLATBUFFERS_HAS_STRING_VIEW 1 + // Check for std::experimental::string_view (in c++14, compiler-dependent) + #elif __has_include() && (__cplusplus >= 201411) + #include + namespace flatbuffers { + typedef std::experimental::string_view string_view; + } + #define FLATBUFFERS_HAS_STRING_VIEW 1 + #endif + #endif // __has_include +#endif // !FLATBUFFERS_HAS_STRING_VIEW + +#ifndef FLATBUFFERS_HAS_NEW_STRTOD + // Modern (C++11) strtod and strtof functions are available for use. + // 1) nan/inf strings as argument of strtod; + // 2) hex-float as argument of strtod/strtof. + #if (defined(_MSC_VER) && _MSC_VER >= 1900) || \ + (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \ + (defined(__clang__)) + #define FLATBUFFERS_HAS_NEW_STRTOD 1 + #endif +#endif // !FLATBUFFERS_HAS_NEW_STRTOD + +#ifndef FLATBUFFERS_LOCALE_INDEPENDENT + // Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}. + // They are part of the POSIX-2008 but not part of the C/C++ standard. + // GCC/Clang have definition (_XOPEN_SOURCE>=700) if POSIX-2008. + #if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \ + (defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE>=700))) + #define FLATBUFFERS_LOCALE_INDEPENDENT 1 + #else + #define FLATBUFFERS_LOCALE_INDEPENDENT 0 + #endif +#endif // !FLATBUFFERS_LOCALE_INDEPENDENT + +// Suppress Undefined Behavior Sanitizer (recoverable only). Usage: +// - __supress_ubsan__("undefined") +// - __supress_ubsan__("signed-integer-overflow") +#if defined(__clang__) + #define __supress_ubsan__(type) __attribute__((no_sanitize(type))) +#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409) + #define __supress_ubsan__(type) __attribute__((no_sanitize_undefined)) +#else + #define __supress_ubsan__(type) +#endif + +// This is constexpr function used for checking compile-time constants. +// Avoid `#pragma warning(disable: 4127) // C4127: expression is constant`. +template FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) { + return !!t; +} + +// Enable C++ attribute [[]] if std:c++17 or higher. +#if ((__cplusplus >= 201703L) \ + || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))) + // All attributes unknown to an implementation are ignored without causing an error. + #define FLATBUFFERS_ATTRIBUTE(attr) [[attr]] + + #define FLATBUFFERS_FALLTHROUGH() [[fallthrough]] +#else + #define FLATBUFFERS_ATTRIBUTE(attr) + + #if FLATBUFFERS_CLANG >= 30800 + #define FLATBUFFERS_FALLTHROUGH() [[clang::fallthrough]] + #elif FLATBUFFERS_GCC >= 70300 + #define FLATBUFFERS_FALLTHROUGH() [[gnu::fallthrough]] + #else + #define FLATBUFFERS_FALLTHROUGH() + #endif +#endif + +/// @endcond + +/// @file +namespace flatbuffers { + +/// @cond FLATBUFFERS_INTERNAL +// Our default offset / size type, 32bit on purpose on 64bit systems. +// Also, using a consistent offset type maintains compatibility of serialized +// offset values between 32bit and 64bit systems. +typedef uint32_t uoffset_t; + +// Signed offsets for references that can go in both directions. +typedef int32_t soffset_t; + +// Offset/index used in v-tables, can be changed to uint8_t in +// format forks to save a bit of space if desired. +typedef uint16_t voffset_t; + +typedef uintmax_t largest_scalar_t; + +// In 32bits, this evaluates to 2GB - 1 +#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(soffset_t) * 8 - 1)) - 1) + +// We support aligning the contents of buffers up to this size. +#define FLATBUFFERS_MAX_ALIGNMENT 16 + +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable: 4127) // C4127: conditional expression is constant +#endif + +template T EndianSwap(T t) { + #if defined(_MSC_VER) + #define FLATBUFFERS_BYTESWAP16 _byteswap_ushort + #define FLATBUFFERS_BYTESWAP32 _byteswap_ulong + #define FLATBUFFERS_BYTESWAP64 _byteswap_uint64 + #else + #if defined(__GNUC__) && __GNUC__ * 100 + __GNUC_MINOR__ < 408 && !defined(__clang__) + // __builtin_bswap16 was missing prior to GCC 4.8. + #define FLATBUFFERS_BYTESWAP16(x) \ + static_cast(__builtin_bswap32(static_cast(x) << 16)) + #else + #define FLATBUFFERS_BYTESWAP16 __builtin_bswap16 + #endif + #define FLATBUFFERS_BYTESWAP32 __builtin_bswap32 + #define FLATBUFFERS_BYTESWAP64 __builtin_bswap64 + #endif + if (sizeof(T) == 1) { // Compile-time if-then's. + return t; + } else if (sizeof(T) == 2) { + union { T t; uint16_t i; } u; + u.t = t; + u.i = FLATBUFFERS_BYTESWAP16(u.i); + return u.t; + } else if (sizeof(T) == 4) { + union { T t; uint32_t i; } u; + u.t = t; + u.i = FLATBUFFERS_BYTESWAP32(u.i); + return u.t; + } else if (sizeof(T) == 8) { + union { T t; uint64_t i; } u; + u.t = t; + u.i = FLATBUFFERS_BYTESWAP64(u.i); + return u.t; + } else { + FLATBUFFERS_ASSERT(0); + } +} + +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + + +template T EndianScalar(T t) { + #if FLATBUFFERS_LITTLEENDIAN + return t; + #else + return EndianSwap(t); + #endif +} + +template +// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. +__supress_ubsan__("alignment") +T ReadScalar(const void *p) { + return EndianScalar(*reinterpret_cast(p)); +} + +template +// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. +__supress_ubsan__("alignment") +void WriteScalar(void *p, T t) { + *reinterpret_cast(p) = EndianScalar(t); +} + +template struct Offset; +template __supress_ubsan__("alignment") void WriteScalar(void *p, Offset t) { + *reinterpret_cast(p) = EndianScalar(t.o); +} + +// Computes how many bytes you'd have to pad to be able to write an +// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in +// memory). +inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) { + return ((~buf_size) + 1) & (scalar_size - 1); +} + +} // namespace flatbuffers +#endif // FLATBUFFERS_BASE_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/code_generators.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/code_generators.h new file mode 100644 index 0000000000000000000000000000000000000000..a3fd98c20bd5cb2f0b95fc92f02230130d505983 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/code_generators.h @@ -0,0 +1,203 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_CODE_GENERATORS_H_ +#define FLATBUFFERS_CODE_GENERATORS_H_ + +#include +#include +#include "flatbuffers/idl.h" + +namespace flatbuffers { + +// Utility class to assist in generating code through use of text templates. +// +// Example code: +// CodeWriter code; +// code.SetValue("NAME", "Foo"); +// code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }"; +// code.SetValue("NAME", "Bar"); +// code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }"; +// std::cout << code.ToString() << std::endl; +// +// Output: +// void Foo() { printf("%s", "Foo"); } +// void Bar() { printf("%s", "Bar"); } +class CodeWriter { + public: + CodeWriter() {} + + // Clears the current "written" code. + void Clear() { + stream_.str(""); + stream_.clear(); + } + + // Associates a key with a value. All subsequent calls to operator+=, where + // the specified key is contained in {{ and }} delimiters will be replaced by + // the given value. + void SetValue(const std::string &key, const std::string &value) { + value_map_[key] = value; + } + + // Appends the given text to the generated code as well as a newline + // character. Any text within {{ and }} delimeters is replaced by values + // previously stored in the CodeWriter by calling SetValue above. The newline + // will be suppressed if the text ends with the \\ character. + void operator+=(std::string text); + + // Returns the current contents of the CodeWriter as a std::string. + std::string ToString() const { return stream_.str(); } + + private: + std::map value_map_; + std::stringstream stream_; +}; + +class BaseGenerator { + public: + virtual bool generate() = 0; + + static std::string NamespaceDir(const Parser &parser, const std::string &path, + const Namespace &ns); + + protected: + BaseGenerator(const Parser &parser, const std::string &path, + const std::string &file_name, + const std::string qualifying_start, + const std::string qualifying_separator) + : parser_(parser), + path_(path), + file_name_(file_name), + qualifying_start_(qualifying_start), + qualifying_separator_(qualifying_separator) {} + virtual ~BaseGenerator() {} + + // No copy/assign. + BaseGenerator &operator=(const BaseGenerator &); + BaseGenerator(const BaseGenerator &); + + std::string NamespaceDir(const Namespace &ns) const; + + static const char *FlatBuffersGeneratedWarning(); + + static std::string FullNamespace(const char *separator, const Namespace &ns); + + static std::string LastNamespacePart(const Namespace &ns); + + // tracks the current namespace for early exit in WrapInNameSpace + // c++, java and csharp returns a different namespace from + // the following default (no early exit, always fully qualify), + // which works for js and php + virtual const Namespace *CurrentNameSpace() const { return nullptr; } + + // Ensure that a type is prefixed with its namespace whenever it is used + // outside of its namespace. + std::string WrapInNameSpace(const Namespace *ns, + const std::string &name) const; + + std::string WrapInNameSpace(const Definition &def) const; + + std::string GetNameSpace(const Definition &def) const; + + const Parser &parser_; + const std::string &path_; + const std::string &file_name_; + const std::string qualifying_start_; + const std::string qualifying_separator_; +}; + +struct CommentConfig { + const char *first_line; + const char *content_line_prefix; + const char *last_line; +}; + +extern void GenComment(const std::vector &dc, + std::string *code_ptr, const CommentConfig *config, + const char *prefix = ""); + +class FloatConstantGenerator { + public: + virtual ~FloatConstantGenerator() {} + std::string GenFloatConstant(const FieldDef &field) const; + + private: + virtual std::string Value(double v, const std::string &src) const = 0; + virtual std::string Inf(double v) const = 0; + virtual std::string NaN(double v) const = 0; + + virtual std::string Value(float v, const std::string &src) const = 0; + virtual std::string Inf(float v) const = 0; + virtual std::string NaN(float v) const = 0; + + template + std::string GenFloatConstantImpl(const FieldDef &field) const; +}; + +class SimpleFloatConstantGenerator : public FloatConstantGenerator { + public: + SimpleFloatConstantGenerator(const char *nan_number, + const char *pos_inf_number, + const char *neg_inf_number); + + private: + std::string Value(double v, + const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(double v) const FLATBUFFERS_OVERRIDE; + std::string NaN(double v) const FLATBUFFERS_OVERRIDE; + + std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(float v) const FLATBUFFERS_OVERRIDE; + std::string NaN(float v) const FLATBUFFERS_OVERRIDE; + + const std::string nan_number_; + const std::string pos_inf_number_; + const std::string neg_inf_number_; +}; + +// C++, C#, Java like generator. +class TypedFloatConstantGenerator : public FloatConstantGenerator { + public: + TypedFloatConstantGenerator(const char *double_prefix, + const char *single_prefix, const char *nan_number, + const char *pos_inf_number, + const char *neg_inf_number = ""); + + private: + std::string Value(double v, + const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(double v) const FLATBUFFERS_OVERRIDE; + + std::string NaN(double v) const FLATBUFFERS_OVERRIDE; + + std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Inf(float v) const FLATBUFFERS_OVERRIDE; + std::string NaN(float v) const FLATBUFFERS_OVERRIDE; + + std::string MakeNaN(const std::string &prefix) const; + std::string MakeInf(bool neg, const std::string &prefix) const; + + const std::string double_prefix_; + const std::string single_prefix_; + const std::string nan_number_; + const std::string pos_inf_number_; + const std::string neg_inf_number_; +}; + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_CODE_GENERATORS_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/flatc.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/flatc.h new file mode 100644 index 0000000000000000000000000000000000000000..728b70a0e41889ffa26ec19600efaf0d01959aba --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/flatc.h @@ -0,0 +1,96 @@ +/* + * Copyright 2017 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "flatbuffers/flatbuffers.h" +#include "flatbuffers/idl.h" +#include "flatbuffers/util.h" + +#ifndef FLATC_H_ +# define FLATC_H_ + +namespace flatbuffers { + +class FlatCompiler { + public: + // Output generator for the various programming languages and formats we + // support. + struct Generator { + typedef bool (*GenerateFn)(const flatbuffers::Parser &parser, + const std::string &path, + const std::string &file_name); + typedef std::string (*MakeRuleFn)(const flatbuffers::Parser &parser, + const std::string &path, + const std::string &file_name); + + GenerateFn generate; + const char *generator_opt_short; + const char *generator_opt_long; + const char *lang_name; + bool schema_only; + GenerateFn generateGRPC; + flatbuffers::IDLOptions::Language lang; + const char *generator_help; + MakeRuleFn make_rule; + }; + + typedef void (*WarnFn)(const FlatCompiler *flatc, const std::string &warn, + bool show_exe_name); + + typedef void (*ErrorFn)(const FlatCompiler *flatc, const std::string &err, + bool usage, bool show_exe_name); + + // Parameters required to initialize the FlatCompiler. + struct InitParams { + InitParams() + : generators(nullptr), + num_generators(0), + warn_fn(nullptr), + error_fn(nullptr) {} + + const Generator *generators; + size_t num_generators; + WarnFn warn_fn; + ErrorFn error_fn; + }; + + explicit FlatCompiler(const InitParams ¶ms) : params_(params) {} + + int Compile(int argc, const char **argv); + + std::string GetUsageString(const char *program_name) const; + + private: + void ParseFile(flatbuffers::Parser &parser, const std::string &filename, + const std::string &contents, + std::vector &include_directories) const; + + void LoadBinarySchema(Parser &parser, const std::string &filename, + const std::string &contents); + + void Warn(const std::string &warn, bool show_exe_name = true) const; + + void Error(const std::string &err, bool usage = true, + bool show_exe_name = true) const; + + InitParams params_; +}; + +} // namespace MindSpore.flatbuffers + +#endif // FLATC_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/flexbuffers.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/flexbuffers.h new file mode 100644 index 0000000000000000000000000000000000000000..d854915e23a52c6ddbca47d5a77ba691838c05ed --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/flexbuffers.h @@ -0,0 +1,1538 @@ +/* + * Copyright 2017 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_FLEXBUFFERS_H_ +#define FLATBUFFERS_FLEXBUFFERS_H_ + +#include +// Used to select STL variant. +#include "base.h" +// We use the basic binary writing functions from the regular FlatBuffers. +#include "flatbuffers/util.h" + +#ifdef _MSC_VER +# include +#endif + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4127) // C4127: conditional expression is constant +#endif + +namespace flexbuffers { + +class Reference; +class Map; + +// These are used in the lower 2 bits of a type field to determine the size of +// the elements (and or size field) of the item pointed to (e.g. vector). +enum BitWidth { + BIT_WIDTH_8 = 0, + BIT_WIDTH_16 = 1, + BIT_WIDTH_32 = 2, + BIT_WIDTH_64 = 3, +}; + +// These are used as the upper 6 bits of a type field to indicate the actual +// type. +enum Type { + FBT_NULL = 0, + FBT_INT = 1, + FBT_UINT = 2, + FBT_FLOAT = 3, + // Types above stored inline, types below store an offset. + FBT_KEY = 4, + FBT_STRING = 5, + FBT_INDIRECT_INT = 6, + FBT_INDIRECT_UINT = 7, + FBT_INDIRECT_FLOAT = 8, + FBT_MAP = 9, + FBT_VECTOR = 10, // Untyped. + FBT_VECTOR_INT = 11, // Typed any size (stores no type table). + FBT_VECTOR_UINT = 12, + FBT_VECTOR_FLOAT = 13, + FBT_VECTOR_KEY = 14, + FBT_VECTOR_STRING = 15, + FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). + FBT_VECTOR_UINT2 = 17, + FBT_VECTOR_FLOAT2 = 18, + FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). + FBT_VECTOR_UINT3 = 20, + FBT_VECTOR_FLOAT3 = 21, + FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). + FBT_VECTOR_UINT4 = 23, + FBT_VECTOR_FLOAT4 = 24, + FBT_BLOB = 25, + FBT_BOOL = 26, + FBT_VECTOR_BOOL = + 36, // To Allow the same type of conversion of type to vector type +}; + +inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } + +inline bool IsTypedVectorElementType(Type t) { + return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; +} + +inline bool IsTypedVector(Type t) { + return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING) || + t == FBT_VECTOR_BOOL; +} + +inline bool IsFixedTypedVector(Type t) { + return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; +} + +inline Type ToTypedVector(Type t, size_t fixed_len = 0) { + FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); + switch (fixed_len) { + case 0: return static_cast(t - FBT_INT + FBT_VECTOR_INT); + case 2: return static_cast(t - FBT_INT + FBT_VECTOR_INT2); + case 3: return static_cast(t - FBT_INT + FBT_VECTOR_INT3); + case 4: return static_cast(t - FBT_INT + FBT_VECTOR_INT4); + default: FLATBUFFERS_ASSERT(0); return FBT_NULL; + } +} + +inline Type ToTypedVectorElementType(Type t) { + FLATBUFFERS_ASSERT(IsTypedVector(t)); + return static_cast(t - FBT_VECTOR_INT + FBT_INT); +} + +inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { + FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); + auto fixed_type = t - FBT_VECTOR_INT2; + *len = static_cast(fixed_type / 3 + + 2); // 3 types each, starting from length 2. + return static_cast(fixed_type % 3 + FBT_INT); +} + +// TODO: implement proper support for 8/16bit floats, or decide not to +// support them. +typedef int16_t half; +typedef int8_t quarter; + +// TODO: can we do this without conditionals using intrinsics or inline asm +// on some platforms? Given branch prediction the method below should be +// decently quick, but it is the most frequently executed function. +// We could do an (unaligned) 64-bit read if we ifdef out the platforms for +// which that doesn't work (or where we'd read into un-owned memory). +template +R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { + return byte_width < 4 + ? (byte_width < 2 + ? static_cast(flatbuffers::ReadScalar(data)) + : static_cast(flatbuffers::ReadScalar(data))) + : (byte_width < 8 + ? static_cast(flatbuffers::ReadScalar(data)) + : static_cast(flatbuffers::ReadScalar(data))); +} + +inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { + return ReadSizedScalar( + data, byte_width); +} + +inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { + // This is the "hottest" function (all offset lookups use this), so worth + // optimizing if possible. + // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a + // constant, which here it isn't. Test if memcpy is still faster than + // the conditionals in ReadSizedScalar. Can also use inline asm. + // clang-format off + #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86) + uint64_t u = 0; + __movsb(reinterpret_cast(&u), + reinterpret_cast(data), byte_width); + return MindSpore.flatbuffers::EndianScalar(u); + #else + return ReadSizedScalar( + data, byte_width); + #endif + // clang-format on +} + +inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { + return ReadSizedScalar(data, + byte_width); +} + +inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { + return offset - ReadUInt64(offset, byte_width); +} + +template const uint8_t *Indirect(const uint8_t *offset) { + return offset - flatbuffers::ReadScalar(offset); +} + +inline BitWidth WidthU(uint64_t u) { +#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ + { \ + if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ + } + FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); + FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); + FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); +#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH + return BIT_WIDTH_64; +} + +inline BitWidth WidthI(int64_t i) { + auto u = static_cast(i) << 1; + return WidthU(i >= 0 ? u : ~u); +} + +inline BitWidth WidthF(double f) { + return static_cast(static_cast(f)) == f ? BIT_WIDTH_32 + : BIT_WIDTH_64; +} + +// Base class of all types below. +// Points into the data buffer and allows access to one type. +class Object { + public: + Object(const uint8_t *data, uint8_t byte_width) + : data_(data), byte_width_(byte_width) {} + + protected: + const uint8_t *data_; + uint8_t byte_width_; +}; + +// Stores size in `byte_width_` bytes before data_ pointer. +class Sized : public Object { + public: + Sized(const uint8_t *data, uint8_t byte_width) : Object(data, byte_width) {} + size_t size() const { + return static_cast(ReadUInt64(data_ - byte_width_, byte_width_)); + } +}; + +class String : public Sized { + public: + String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} + + size_t length() const { return size(); } + const char *c_str() const { return reinterpret_cast(data_); } + std::string str() const { return std::string(c_str(), length()); } + + static String EmptyString() { + static const uint8_t empty_string[] = { 0 /*len*/, 0 /*terminator*/ }; + return String(empty_string + 1, 1); + } + bool IsTheEmptyString() const { return data_ == EmptyString().data_; } +}; + +class Blob : public Sized { + public: + Blob(const uint8_t *data_buf, uint8_t byte_width) + : Sized(data_buf, byte_width) {} + + static Blob EmptyBlob() { + static const uint8_t empty_blob[] = { 0 /*len*/ }; + return Blob(empty_blob + 1, 1); + } + bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } + const uint8_t *data() const { return data_; } +}; + +class Vector : public Sized { + public: + Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} + + Reference operator[](size_t i) const; + + static Vector EmptyVector() { + static const uint8_t empty_vector[] = { 0 /*len*/ }; + return Vector(empty_vector + 1, 1); + } + bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } +}; + +class TypedVector : public Sized { + public: + TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) + : Sized(data, byte_width), type_(element_type) {} + + Reference operator[](size_t i) const; + + static TypedVector EmptyTypedVector() { + static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; + return TypedVector(empty_typed_vector + 1, 1, FBT_INT); + } + bool IsTheEmptyVector() const { + return data_ == TypedVector::EmptyTypedVector().data_; + } + + Type ElementType() { return type_; } + + private: + Type type_; + + friend Map; +}; + +class FixedTypedVector : public Object { + public: + FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, + uint8_t len) + : Object(data, byte_width), type_(element_type), len_(len) {} + + Reference operator[](size_t i) const; + + static FixedTypedVector EmptyFixedTypedVector() { + static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; + return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); + } + bool IsTheEmptyFixedTypedVector() const { + return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; + } + + Type ElementType() { return type_; } + uint8_t size() { return len_; } + + private: + Type type_; + uint8_t len_; +}; + +class Map : public Vector { + public: + Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} + + Reference operator[](const char *key) const; + Reference operator[](const std::string &key) const; + + Vector Values() const { return Vector(data_, byte_width_); } + + TypedVector Keys() const { + const size_t num_prefixed_fields = 3; + auto keys_offset = data_ - byte_width_ * num_prefixed_fields; + return TypedVector(Indirect(keys_offset, byte_width_), + static_cast( + ReadUInt64(keys_offset + byte_width_, byte_width_)), + FBT_KEY); + } + + static Map EmptyMap() { + static const uint8_t empty_map[] = { + 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ + }; + return Map(empty_map + 4, 1); + } + + bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } +}; + +template +void AppendToString(std::string &s, T &&v, bool keys_quoted) { + s += "[ "; + for (size_t i = 0; i < v.size(); i++) { + if (i) s += ", "; + v[i].ToString(true, keys_quoted, s); + } + s += " ]"; +} + +class Reference { + public: + Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, + Type type) + : data_(data), + parent_width_(parent_width), + byte_width_(byte_width), + type_(type) {} + + Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) + : data_(data), parent_width_(parent_width) { + byte_width_ = 1U << static_cast(packed_type & 3); + type_ = static_cast(packed_type >> 2); + } + + Type GetType() const { return type_; } + + bool IsNull() const { return type_ == FBT_NULL; } + bool IsBool() const { return type_ == FBT_BOOL; } + bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } + bool IsUInt() const { + return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; + } + bool IsIntOrUint() const { return IsInt() || IsUInt(); } + bool IsFloat() const { + return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; + } + bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } + bool IsString() const { return type_ == FBT_STRING; } + bool IsKey() const { return type_ == FBT_KEY; } + bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } + bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } + bool IsFixedTypedVector() const { return flexbuffers::IsFixedTypedVector(type_); } + bool IsAnyVector() const { return (IsTypedVector() || IsFixedTypedVector() || IsVector());} + bool IsMap() const { return type_ == FBT_MAP; } + bool IsBlob() const { return type_ == FBT_BLOB; } + + bool AsBool() const { + return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) + : AsUInt64()) != 0; + } + + // Reads any type as a int64_t. Never fails, does most sensible conversion. + // Truncates floats, strings are attempted to be parsed for a number, + // vectors/maps return their size. Returns 0 if all else fails. + int64_t AsInt64() const { + if (type_ == FBT_INT) { + // A fast path for the common case. + return ReadInt64(data_, parent_width_); + } else + switch (type_) { + case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); + case FBT_UINT: return ReadUInt64(data_, parent_width_); + case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); + case FBT_FLOAT: + return static_cast(ReadDouble(data_, parent_width_)); + case FBT_INDIRECT_FLOAT: + return static_cast(ReadDouble(Indirect(), byte_width_)); + case FBT_NULL: return 0; + case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); + case FBT_VECTOR: return static_cast(AsVector().size()); + case FBT_BOOL: return ReadInt64(data_, parent_width_); + default: + // Convert other things to int. + return 0; + } + } + + // TODO: could specialize these to not use AsInt64() if that saves + // extension ops in generated code, and use a faster op than ReadInt64. + int32_t AsInt32() const { return static_cast(AsInt64()); } + int16_t AsInt16() const { return static_cast(AsInt64()); } + int8_t AsInt8() const { return static_cast(AsInt64()); } + + uint64_t AsUInt64() const { + if (type_ == FBT_UINT) { + // A fast path for the common case. + return ReadUInt64(data_, parent_width_); + } else + switch (type_) { + case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); + case FBT_INT: return ReadInt64(data_, parent_width_); + case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); + case FBT_FLOAT: + return static_cast(ReadDouble(data_, parent_width_)); + case FBT_INDIRECT_FLOAT: + return static_cast(ReadDouble(Indirect(), byte_width_)); + case FBT_NULL: return 0; + case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); + case FBT_VECTOR: return static_cast(AsVector().size()); + case FBT_BOOL: return ReadUInt64(data_, parent_width_); + default: + // Convert other things to uint. + return 0; + } + } + + uint32_t AsUInt32() const { return static_cast(AsUInt64()); } + uint16_t AsUInt16() const { return static_cast(AsUInt64()); } + uint8_t AsUInt8() const { return static_cast(AsUInt64()); } + + double AsDouble() const { + if (type_ == FBT_FLOAT) { + // A fast path for the common case. + return ReadDouble(data_, parent_width_); + } else + switch (type_) { + case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); + case FBT_INT: + return static_cast(ReadInt64(data_, parent_width_)); + case FBT_UINT: + return static_cast(ReadUInt64(data_, parent_width_)); + case FBT_INDIRECT_INT: + return static_cast(ReadInt64(Indirect(), byte_width_)); + case FBT_INDIRECT_UINT: + return static_cast(ReadUInt64(Indirect(), byte_width_)); + case FBT_NULL: return 0.0; + case FBT_STRING: return strtod(AsString().c_str(), nullptr); + case FBT_VECTOR: return static_cast(AsVector().size()); + case FBT_BOOL: + return static_cast(ReadUInt64(data_, parent_width_)); + default: + // Convert strings and other things to float. + return 0; + } + } + + float AsFloat() const { return static_cast(AsDouble()); } + + const char *AsKey() const { + if (type_ == FBT_KEY) { + return reinterpret_cast(Indirect()); + } else { + return ""; + } + } + + // This function returns the empty string if you try to read a not-string. + String AsString() const { + if (type_ == FBT_STRING) { + return String(Indirect(), byte_width_); + } else { + return String::EmptyString(); + } + } + + // Unlike AsString(), this will convert any type to a std::string. + std::string ToString() const { + std::string s; + ToString(false, false, s); + return s; + } + + // Convert any type to a JSON-like string. strings_quoted determines if + // string values at the top level receive "" quotes (inside other values + // they always do). keys_quoted determines if keys are quoted, at any level. + // TODO(wvo): add further options to have indentation/newlines. + void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { + if (type_ == FBT_STRING) { + String str(Indirect(), byte_width_); + if (strings_quoted) { + flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false); + } else { + s.append(str.c_str(), str.length()); + } + } else if (IsKey()) { + auto str = AsKey(); + if (keys_quoted) { + flatbuffers::EscapeString(str, strlen(str), &s, true, false); + } else { + s += str; + } + } else if (IsInt()) { + s += flatbuffers::NumToString(AsInt64()); + } else if (IsUInt()) { + s += flatbuffers::NumToString(AsUInt64()); + } else if (IsFloat()) { + s += flatbuffers::NumToString(AsDouble()); + } else if (IsNull()) { + s += "null"; + } else if (IsBool()) { + s += AsBool() ? "true" : "false"; + } else if (IsMap()) { + s += "{ "; + auto m = AsMap(); + auto keys = m.Keys(); + auto vals = m.Values(); + for (size_t i = 0; i < keys.size(); i++) { + keys[i].ToString(true, keys_quoted, s); + s += ": "; + vals[i].ToString(true, keys_quoted, s); + if (i < keys.size() - 1) s += ", "; + } + s += " }"; + } else if (IsVector()) { + AppendToString(s, AsVector(), keys_quoted); + } else if (IsTypedVector()) { + AppendToString(s, AsTypedVector(), keys_quoted); + } else if (IsFixedTypedVector()) { + AppendToString(s, AsFixedTypedVector(), keys_quoted); + } else if (IsBlob()) { + auto blob = AsBlob(); + flatbuffers::EscapeString(reinterpret_cast(blob.data()), blob.size(), &s, true, false); + } else { + s += "(?)"; + } + } + + // This function returns the empty blob if you try to read a not-blob. + // Strings can be viewed as blobs too. + Blob AsBlob() const { + if (type_ == FBT_BLOB || type_ == FBT_STRING) { + return Blob(Indirect(), byte_width_); + } else { + return Blob::EmptyBlob(); + } + } + + // This function returns the empty vector if you try to read a not-vector. + // Maps can be viewed as vectors too. + Vector AsVector() const { + if (type_ == FBT_VECTOR || type_ == FBT_MAP) { + return Vector(Indirect(), byte_width_); + } else { + return Vector::EmptyVector(); + } + } + + TypedVector AsTypedVector() const { + if (IsTypedVector()) { + return TypedVector(Indirect(), byte_width_, + ToTypedVectorElementType(type_)); + } else { + return TypedVector::EmptyTypedVector(); + } + } + + FixedTypedVector AsFixedTypedVector() const { + if (IsFixedTypedVector()) { + uint8_t len = 0; + auto vtype = ToFixedTypedVectorElementType(type_, &len); + return FixedTypedVector(Indirect(), byte_width_, vtype, len); + } else { + return FixedTypedVector::EmptyFixedTypedVector(); + } + } + + Map AsMap() const { + if (type_ == FBT_MAP) { + return Map(Indirect(), byte_width_); + } else { + return Map::EmptyMap(); + } + } + + template T As() const; + + // Experimental: Mutation functions. + // These allow scalars in an already created buffer to be updated in-place. + // Since by default scalars are stored in the smallest possible space, + // the new value may not fit, in which case these functions return false. + // To avoid this, you can construct the values you intend to mutate using + // Builder::ForceMinimumBitWidth. + bool MutateInt(int64_t i) { + if (type_ == FBT_INT) { + return Mutate(data_, i, parent_width_, WidthI(i)); + } else if (type_ == FBT_INDIRECT_INT) { + return Mutate(Indirect(), i, byte_width_, WidthI(i)); + } else if (type_ == FBT_UINT) { + auto u = static_cast(i); + return Mutate(data_, u, parent_width_, WidthU(u)); + } else if (type_ == FBT_INDIRECT_UINT) { + auto u = static_cast(i); + return Mutate(Indirect(), u, byte_width_, WidthU(u)); + } else { + return false; + } + } + + bool MutateBool(bool b) { + return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); + } + + bool MutateUInt(uint64_t u) { + if (type_ == FBT_UINT) { + return Mutate(data_, u, parent_width_, WidthU(u)); + } else if (type_ == FBT_INDIRECT_UINT) { + return Mutate(Indirect(), u, byte_width_, WidthU(u)); + } else if (type_ == FBT_INT) { + auto i = static_cast(u); + return Mutate(data_, i, parent_width_, WidthI(i)); + } else if (type_ == FBT_INDIRECT_INT) { + auto i = static_cast(u); + return Mutate(Indirect(), i, byte_width_, WidthI(i)); + } else { + return false; + } + } + + bool MutateFloat(float f) { + if (type_ == FBT_FLOAT) { + return MutateF(data_, f, parent_width_, BIT_WIDTH_32); + } else if (type_ == FBT_INDIRECT_FLOAT) { + return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); + } else { + return false; + } + } + + bool MutateFloat(double d) { + if (type_ == FBT_FLOAT) { + return MutateF(data_, d, parent_width_, WidthF(d)); + } else if (type_ == FBT_INDIRECT_FLOAT) { + return MutateF(Indirect(), d, byte_width_, WidthF(d)); + } else { + return false; + } + } + + bool MutateString(const char *str, size_t len) { + auto s = AsString(); + if (s.IsTheEmptyString()) return false; + // This is very strict, could allow shorter strings, but that creates + // garbage. + if (s.length() != len) return false; + memcpy(const_cast(s.c_str()), str, len); + return true; + } + bool MutateString(const char *str) { return MutateString(str, strlen(str)); } + bool MutateString(const std::string &str) { + return MutateString(str.data(), str.length()); + } + + private: + const uint8_t *Indirect() const { + return flexbuffers::Indirect(data_, parent_width_); + } + + template + bool Mutate(const uint8_t *dest, T t, size_t byte_width, + BitWidth value_width) { + auto fits = static_cast(static_cast(1U) << value_width) <= + byte_width; + if (fits) { + t = flatbuffers::EndianScalar(t); + memcpy(const_cast(dest), &t, byte_width); + } + return fits; + } + + template + bool MutateF(const uint8_t *dest, T t, size_t byte_width, + BitWidth value_width) { + if (byte_width == sizeof(double)) + return Mutate(dest, static_cast(t), byte_width, value_width); + if (byte_width == sizeof(float)) + return Mutate(dest, static_cast(t), byte_width, value_width); + FLATBUFFERS_ASSERT(false); + return false; + } + + const uint8_t *data_; + uint8_t parent_width_; + uint8_t byte_width_; + Type type_; +}; + +// Template specialization for As(). +template<> inline bool Reference::As() const { return AsBool(); } + +template<> inline int8_t Reference::As() const { return AsInt8(); } +template<> inline int16_t Reference::As() const { return AsInt16(); } +template<> inline int32_t Reference::As() const { return AsInt32(); } +template<> inline int64_t Reference::As() const { return AsInt64(); } + +template<> inline uint8_t Reference::As() const { return AsUInt8(); } +template<> inline uint16_t Reference::As() const { return AsUInt16(); } +template<> inline uint32_t Reference::As() const { return AsUInt32(); } +template<> inline uint64_t Reference::As() const { return AsUInt64(); } + +template<> inline double Reference::As() const { return AsDouble(); } +template<> inline float Reference::As() const { return AsFloat(); } + +template<> inline String Reference::As() const { return AsString(); } +template<> inline std::string Reference::As() const { + return AsString().str(); +} + +template<> inline Blob Reference::As() const { return AsBlob(); } +template<> inline Vector Reference::As() const { return AsVector(); } +template<> inline TypedVector Reference::As() const { + return AsTypedVector(); +} +template<> inline FixedTypedVector Reference::As() const { + return AsFixedTypedVector(); +} +template<> inline Map Reference::As() const { return AsMap(); } + +inline uint8_t PackedType(BitWidth bit_width, Type type) { + return static_cast(bit_width | (type << 2)); +} + +inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } + +// Vector accessors. +// Note: if you try to access outside of bounds, you get a Null value back +// instead. Normally this would be an assert, but since this is "dynamically +// typed" data, you may not want that (someone sends you a 2d vector and you +// wanted 3d). +// The Null converts seamlessly into a default value for any other type. +// TODO(wvo): Could introduce an #ifdef that makes this into an assert? +inline Reference Vector::operator[](size_t i) const { + auto len = size(); + if (i >= len) return Reference(nullptr, 1, NullPackedType()); + auto packed_type = (data_ + len * byte_width_)[i]; + auto elem = data_ + i * byte_width_; + return Reference(elem, byte_width_, packed_type); +} + +inline Reference TypedVector::operator[](size_t i) const { + auto len = size(); + if (i >= len) return Reference(nullptr, 1, NullPackedType()); + auto elem = data_ + i * byte_width_; + return Reference(elem, byte_width_, 1, type_); +} + +inline Reference FixedTypedVector::operator[](size_t i) const { + if (i >= len_) return Reference(nullptr, 1, NullPackedType()); + auto elem = data_ + i * byte_width_; + return Reference(elem, byte_width_, 1, type_); +} + +template int KeyCompare(const void *key, const void *elem) { + auto str_elem = reinterpret_cast( + Indirect(reinterpret_cast(elem))); + auto skey = reinterpret_cast(key); + return strcmp(skey, str_elem); +} + +inline Reference Map::operator[](const char *key) const { + auto keys = Keys(); + // We can't pass keys.byte_width_ to the comparison function, so we have + // to pick the right one ahead of time. + int (*comp)(const void *, const void *) = nullptr; + switch (keys.byte_width_) { + case 1: comp = KeyCompare; break; + case 2: comp = KeyCompare; break; + case 4: comp = KeyCompare; break; + case 8: comp = KeyCompare; break; + } + auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); + if (!res) return Reference(nullptr, 1, NullPackedType()); + auto i = (reinterpret_cast(res) - keys.data_) / keys.byte_width_; + return (*static_cast(this))[i]; +} + +inline Reference Map::operator[](const std::string &key) const { + return (*this)[key.c_str()]; +} + +inline Reference GetRoot(const uint8_t *buffer, size_t size) { + // See Finish() below for the serialization counterpart of this. + // The root starts at the end of the buffer, so we parse backwards from there. + auto end = buffer + size; + auto byte_width = *--end; + auto packed_type = *--end; + end -= byte_width; // The root data item. + return Reference(end, byte_width, packed_type); +} + +inline Reference GetRoot(const std::vector &buffer) { + return GetRoot(flatbuffers::vector_data(buffer), buffer.size()); +} + +// Flags that configure how the Builder behaves. +// The "Share" flags determine if the Builder automatically tries to pool +// this type. Pooling can reduce the size of serialized data if there are +// multiple maps of the same kind, at the expense of slightly slower +// serialization (the cost of lookups) and more memory use (std::set). +// By default this is on for keys, but off for strings. +// Turn keys off if you have e.g. only one map. +// Turn strings on if you expect many non-unique string values. +// Additionally, sharing key vectors can save space if you have maps with +// identical field populations. +enum BuilderFlag { + BUILDER_FLAG_NONE = 0, + BUILDER_FLAG_SHARE_KEYS = 1, + BUILDER_FLAG_SHARE_STRINGS = 2, + BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, + BUILDER_FLAG_SHARE_KEY_VECTORS = 4, + BUILDER_FLAG_SHARE_ALL = 7, +}; + +class Builder FLATBUFFERS_FINAL_CLASS { + public: + Builder(size_t initial_size = 256, + BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) + : buf_(initial_size), + finished_(false), + flags_(flags), + force_min_bit_width_(BIT_WIDTH_8), + key_pool(KeyOffsetCompare(buf_)), + string_pool(StringOffsetCompare(buf_)) { + buf_.clear(); + } + + /// @brief Get the serialized buffer (after you call `Finish()`). + /// @return Returns a vector owned by this class. + const std::vector &GetBuffer() const { + Finished(); + return buf_; + } + + // Size of the buffer. Does not include unfinished values. + size_t GetSize() const { return buf_.size(); } + + // Reset all state so we can re-use the buffer. + void Clear() { + buf_.clear(); + stack_.clear(); + finished_ = false; + // flags_ remains as-is; + force_min_bit_width_ = BIT_WIDTH_8; + key_pool.clear(); + string_pool.clear(); + } + + // All value constructing functions below have two versions: one that + // takes a key (for placement inside a map) and one that doesn't (for inside + // vectors and elsewhere). + + void Null() { stack_.push_back(Value()); } + void Null(const char *key) { + Key(key); + Null(); + } + + void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } + void Int(const char *key, int64_t i) { + Key(key); + Int(i); + } + + void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } + void UInt(const char *key, uint64_t u) { + Key(key); + UInt(u); + } + + void Float(float f) { stack_.push_back(Value(f)); } + void Float(const char *key, float f) { + Key(key); + Float(f); + } + + void Double(double f) { stack_.push_back(Value(f)); } + void Double(const char *key, double d) { + Key(key); + Double(d); + } + + void Bool(bool b) { stack_.push_back(Value(b)); } + void Bool(const char *key, bool b) { + Key(key); + Bool(b); + } + + void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } + void IndirectInt(const char *key, int64_t i) { + Key(key); + IndirectInt(i); + } + + void IndirectUInt(uint64_t u) { + PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); + } + void IndirectUInt(const char *key, uint64_t u) { + Key(key); + IndirectUInt(u); + } + + void IndirectFloat(float f) { + PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); + } + void IndirectFloat(const char *key, float f) { + Key(key); + IndirectFloat(f); + } + + void IndirectDouble(double f) { + PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); + } + void IndirectDouble(const char *key, double d) { + Key(key); + IndirectDouble(d); + } + + size_t Key(const char *str, size_t len) { + auto sloc = buf_.size(); + WriteBytes(str, len + 1); + if (flags_ & BUILDER_FLAG_SHARE_KEYS) { + auto it = key_pool.find(sloc); + if (it != key_pool.end()) { + // Already in the buffer. Remove key we just serialized, and use + // existing offset instead. + buf_.resize(sloc); + sloc = *it; + } else { + key_pool.insert(sloc); + } + } + stack_.push_back(Value(static_cast(sloc), FBT_KEY, BIT_WIDTH_8)); + return sloc; + } + + size_t Key(const char *str) { return Key(str, strlen(str)); } + size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } + + size_t String(const char *str, size_t len) { + auto reset_to = buf_.size(); + auto sloc = CreateBlob(str, len, 1, FBT_STRING); + if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { + StringOffset so(sloc, len); + auto it = string_pool.find(so); + if (it != string_pool.end()) { + // Already in the buffer. Remove string we just serialized, and use + // existing offset instead. + buf_.resize(reset_to); + sloc = it->first; + stack_.back().u_ = sloc; + } else { + string_pool.insert(so); + } + } + return sloc; + } + size_t String(const char *str) { return String(str, strlen(str)); } + size_t String(const std::string &str) { + return String(str.c_str(), str.size()); + } + void String(const flexbuffers::String &str) { + String(str.c_str(), str.length()); + } + + void String(const char *key, const char *str) { + Key(key); + String(str); + } + void String(const char *key, const std::string &str) { + Key(key); + String(str); + } + void String(const char *key, const flexbuffers::String &str) { + Key(key); + String(str); + } + + size_t Blob(const void *data, size_t len) { + return CreateBlob(data, len, 0, FBT_BLOB); + } + size_t Blob(const std::vector &v) { + return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB); + } + + // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), + // e.g. Vector etc. Also in overloaded versions. + // Also some FlatBuffers types? + + size_t StartVector() { return stack_.size(); } + size_t StartVector(const char *key) { + Key(key); + return stack_.size(); + } + size_t StartMap() { return stack_.size(); } + size_t StartMap(const char *key) { + Key(key); + return stack_.size(); + } + + // TODO(wvo): allow this to specify an aligment greater than the natural + // alignment. + size_t EndVector(size_t start, bool typed, bool fixed) { + auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); + // Remove temp elements and return vector. + stack_.resize(start); + stack_.push_back(vec); + return static_cast(vec.u_); + } + + size_t EndMap(size_t start) { + // We should have interleaved keys and values on the stack. + // Make sure it is an even number: + auto len = stack_.size() - start; + FLATBUFFERS_ASSERT(!(len & 1)); + len /= 2; + // Make sure keys are all strings: + for (auto key = start; key < stack_.size(); key += 2) { + FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); + } + // Now sort values, so later we can do a binary seach lookup. + // We want to sort 2 array elements at a time. + struct TwoValue { + Value key; + Value val; + }; + // TODO(wvo): strict aliasing? + // TODO(wvo): allow the caller to indicate the data is already sorted + // for maximum efficiency? With an assert to check sortedness to make sure + // we're not breaking binary search. + // Or, we can track if the map is sorted as keys are added which would be + // be quite cheap (cheaper than checking it here), so we can skip this + // step automatically when appliccable, and encourage people to write in + // sorted fashion. + // std::sort is typically already a lot faster on sorted data though. + auto dict = + reinterpret_cast(flatbuffers::vector_data(stack_) + start); + std::sort(dict, dict + len, + [&](const TwoValue &a, const TwoValue &b) -> bool { + auto as = reinterpret_cast( + flatbuffers::vector_data(buf_) + a.key.u_); + auto bs = reinterpret_cast( + flatbuffers::vector_data(buf_) + b.key.u_); + auto comp = strcmp(as, bs); + // If this assertion hits, you've added two keys with the same + // value to this map. + // TODO: Have to check for pointer equality, as some sort + // implementation apparently call this function with the same + // element?? Why? + FLATBUFFERS_ASSERT(comp || &a == &b); + return comp < 0; + }); + // First create a vector out of all keys. + // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share + // the first vector. + auto keys = CreateVector(start, len, 2, true, false); + auto vec = CreateVector(start + 1, len, 2, false, false, &keys); + // Remove temp elements and return map. + stack_.resize(start); + stack_.push_back(vec); + return static_cast(vec.u_); + } + + template size_t Vector(F f) { + auto start = StartVector(); + f(); + return EndVector(start, false, false); + } + template size_t Vector(F f, T &state) { + auto start = StartVector(); + f(state); + return EndVector(start, false, false); + } + template size_t Vector(const char *key, F f) { + auto start = StartVector(key); + f(); + return EndVector(start, false, false); + } + template + size_t Vector(const char *key, F f, T &state) { + auto start = StartVector(key); + f(state); + return EndVector(start, false, false); + } + + template void Vector(const T *elems, size_t len) { + if (flatbuffers::is_scalar::value) { + // This path should be a lot quicker and use less space. + ScalarVector(elems, len, false); + } else { + auto start = StartVector(); + for (size_t i = 0; i < len; i++) Add(elems[i]); + EndVector(start, false, false); + } + } + template + void Vector(const char *key, const T *elems, size_t len) { + Key(key); + Vector(elems, len); + } + template void Vector(const std::vector &vec) { + Vector(flatbuffers::vector_data(vec), vec.size()); + } + + template size_t TypedVector(F f) { + auto start = StartVector(); + f(); + return EndVector(start, true, false); + } + template size_t TypedVector(F f, T &state) { + auto start = StartVector(); + f(state); + return EndVector(start, true, false); + } + template size_t TypedVector(const char *key, F f) { + auto start = StartVector(key); + f(); + return EndVector(start, true, false); + } + template + size_t TypedVector(const char *key, F f, T &state) { + auto start = StartVector(key); + f(state); + return EndVector(start, true, false); + } + + template size_t FixedTypedVector(const T *elems, size_t len) { + // We only support a few fixed vector lengths. Anything bigger use a + // regular typed vector. + FLATBUFFERS_ASSERT(len >= 2 && len <= 4); + // And only scalar values. + static_assert(flatbuffers::is_scalar::value, "Unrelated types"); + return ScalarVector(elems, len, true); + } + + template + size_t FixedTypedVector(const char *key, const T *elems, size_t len) { + Key(key); + return FixedTypedVector(elems, len); + } + + template size_t Map(F f) { + auto start = StartMap(); + f(); + return EndMap(start); + } + template size_t Map(F f, T &state) { + auto start = StartMap(); + f(state); + return EndMap(start); + } + template size_t Map(const char *key, F f) { + auto start = StartMap(key); + f(); + return EndMap(start); + } + template size_t Map(const char *key, F f, T &state) { + auto start = StartMap(key); + f(state); + return EndMap(start); + } + template void Map(const std::map &map) { + auto start = StartMap(); + for (auto it = map.begin(); it != map.end(); ++it) + Add(it->first.c_str(), it->second); + EndMap(start); + } + + // Overloaded Add that tries to call the correct function above. + void Add(int8_t i) { Int(i); } + void Add(int16_t i) { Int(i); } + void Add(int32_t i) { Int(i); } + void Add(int64_t i) { Int(i); } + void Add(uint8_t u) { UInt(u); } + void Add(uint16_t u) { UInt(u); } + void Add(uint32_t u) { UInt(u); } + void Add(uint64_t u) { UInt(u); } + void Add(float f) { Float(f); } + void Add(double d) { Double(d); } + void Add(bool b) { Bool(b); } + void Add(const char *str) { String(str); } + void Add(const std::string &str) { String(str); } + void Add(const flexbuffers::String &str) { String(str); } + + template void Add(const std::vector &vec) { Vector(vec); } + + template void Add(const char *key, const T &t) { + Key(key); + Add(t); + } + + template void Add(const std::map &map) { + Map(map); + } + + template void operator+=(const T &t) { Add(t); } + + // This function is useful in combination with the Mutate* functions above. + // It forces elements of vectors and maps to have a minimum size, such that + // they can later be updated without failing. + // Call with no arguments to reset. + void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { + force_min_bit_width_ = bw; + } + + void Finish() { + // If you hit this assert, you likely have objects that were never included + // in a parent. You need to have exactly one root to finish a buffer. + // Check your Start/End calls are matched, and all objects are inside + // some other object. + FLATBUFFERS_ASSERT(stack_.size() == 1); + + // Write root value. + auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); + WriteAny(stack_[0], byte_width); + // Write root type. + Write(stack_[0].StoredPackedType(), 1); + // Write root size. Normally determined by parent, but root has no parent :) + Write(byte_width, 1); + + finished_ = true; + } + + private: + void Finished() const { + // If you get this assert, you're attempting to get access a buffer + // which hasn't been finished yet. Be sure to call + // Builder::Finish with your root object. + FLATBUFFERS_ASSERT(finished_); + } + + // Align to prepare for writing a scalar with a certain size. + uint8_t Align(BitWidth alignment) { + auto byte_width = 1U << alignment; + buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), + 0); + return static_cast(byte_width); + } + + void WriteBytes(const void *val, size_t size) { + buf_.insert(buf_.end(), reinterpret_cast(val), + reinterpret_cast(val) + size); + } + + template void Write(T val, size_t byte_width) { + FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); + val = flatbuffers::EndianScalar(val); + WriteBytes(&val, byte_width); + } + + void WriteDouble(double f, uint8_t byte_width) { + switch (byte_width) { + case 8: Write(f, byte_width); break; + case 4: Write(static_cast(f), byte_width); break; + // case 2: Write(static_cast(f), byte_width); break; + // case 1: Write(static_cast(f), byte_width); break; + default: FLATBUFFERS_ASSERT(0); + } + } + + void WriteOffset(uint64_t o, uint8_t byte_width) { + auto reloff = buf_.size() - o; + FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); + Write(reloff, byte_width); + } + + template void PushIndirect(T val, Type type, BitWidth bit_width) { + auto byte_width = Align(bit_width); + auto iloc = buf_.size(); + Write(val, byte_width); + stack_.push_back(Value(static_cast(iloc), type, bit_width)); + } + + static BitWidth WidthB(size_t byte_width) { + switch (byte_width) { + case 1: return BIT_WIDTH_8; + case 2: return BIT_WIDTH_16; + case 4: return BIT_WIDTH_32; + case 8: return BIT_WIDTH_64; + default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; + } + } + + template static Type GetScalarType() { + static_assert(flatbuffers::is_scalar::value, "Unrelated types"); + return flatbuffers::is_floating_point::value + ? FBT_FLOAT + : flatbuffers::is_same::value + ? FBT_BOOL + : (flatbuffers::is_unsigned::value ? FBT_UINT + : FBT_INT); + } + + struct Value { + union { + int64_t i_; + uint64_t u_; + double f_; + }; + + Type type_; + + // For scalars: of itself, for vector: of its elements, for string: length. + BitWidth min_bit_width_; + + Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} + + Value(bool b) + : u_(static_cast(b)), + type_(FBT_BOOL), + min_bit_width_(BIT_WIDTH_8) {} + + Value(int64_t i, Type t, BitWidth bw) + : i_(i), type_(t), min_bit_width_(bw) {} + Value(uint64_t u, Type t, BitWidth bw) + : u_(u), type_(t), min_bit_width_(bw) {} + + Value(float f) : f_(f), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {} + Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} + + uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { + return PackedType(StoredWidth(parent_bit_width_), type_); + } + + BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { + if (IsInline(type_)) { + return min_bit_width_; + } else { + // We have an absolute offset, but want to store a relative offset + // elem_index elements beyond the current buffer end. Since whether + // the relative offset fits in a certain byte_width depends on + // the size of the elements before it (and their alignment), we have + // to test for each size in turn. + for (size_t byte_width = 1; + byte_width <= sizeof(flatbuffers::largest_scalar_t); + byte_width *= 2) { + // Where are we going to write this offset? + auto offset_loc = buf_size + + flatbuffers::PaddingBytes(buf_size, byte_width) + + elem_index * byte_width; + // Compute relative offset. + auto offset = offset_loc - u_; + // Does it fit? + auto bit_width = WidthU(offset); + if (static_cast(static_cast(1U) << bit_width) == + byte_width) + return bit_width; + } + FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. + return BIT_WIDTH_64; + } + } + + BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { + if (IsInline(type_)) { + return (std::max)(min_bit_width_, parent_bit_width_); + } else { + return min_bit_width_; + } + } + }; + + void WriteAny(const Value &val, uint8_t byte_width) { + switch (val.type_) { + case FBT_NULL: + case FBT_INT: Write(val.i_, byte_width); break; + case FBT_BOOL: + case FBT_UINT: Write(val.u_, byte_width); break; + case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; + default: WriteOffset(val.u_, byte_width); break; + } + } + + size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { + auto bit_width = WidthU(len); + auto byte_width = Align(bit_width); + Write(len, byte_width); + auto sloc = buf_.size(); + WriteBytes(data, len + trailing); + stack_.push_back(Value(static_cast(sloc), type, bit_width)); + return sloc; + } + + template + size_t ScalarVector(const T *elems, size_t len, bool fixed) { + auto vector_type = GetScalarType(); + auto byte_width = sizeof(T); + auto bit_width = WidthB(byte_width); + // If you get this assert, you're trying to write a vector with a size + // field that is bigger than the scalars you're trying to write (e.g. a + // byte vector > 255 elements). For such types, write a "blob" instead. + // TODO: instead of asserting, could write vector with larger elements + // instead, though that would be wasteful. + FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); + if (!fixed) Write(len, byte_width); + auto vloc = buf_.size(); + for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); + stack_.push_back(Value(static_cast(vloc), + ToTypedVector(vector_type, fixed ? len : 0), + bit_width)); + return vloc; + } + + Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, + bool fixed, const Value *keys = nullptr) { + FLATBUFFERS_ASSERT(!fixed || typed); // typed=false, fixed=true combination is not supported. + // Figure out smallest bit width we can store this vector with. + auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); + auto prefix_elems = 1; + if (keys) { + // If this vector is part of a map, we will pre-fix an offset to the keys + // to this vector. + bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); + prefix_elems += 2; + } + Type vector_type = FBT_KEY; + // Check bit widths and types for all elements. + for (size_t i = start; i < stack_.size(); i += step) { + auto elem_width = stack_[i].ElemWidth(buf_.size(), i + prefix_elems); + bit_width = (std::max)(bit_width, elem_width); + if (typed) { + if (i == start) { + vector_type = stack_[i].type_; + } else { + // If you get this assert, you are writing a typed vector with + // elements that are not all the same type. + FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); + } + } + } + // If you get this assert, your fixed types are not one of: + // Int / UInt / Float / Key. + FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type)); + auto byte_width = Align(bit_width); + // Write vector. First the keys width/offset if available, and size. + if (keys) { + WriteOffset(keys->u_, byte_width); + Write(1ULL << keys->min_bit_width_, byte_width); + } + if (!fixed) Write(vec_len, byte_width); + // Then the actual data. + auto vloc = buf_.size(); + for (size_t i = start; i < stack_.size(); i += step) { + WriteAny(stack_[i], byte_width); + } + // Then the types. + if (!typed) { + for (size_t i = start; i < stack_.size(); i += step) { + buf_.push_back(stack_[i].StoredPackedType(bit_width)); + } + } + return Value(static_cast(vloc), + keys ? FBT_MAP + : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) + : FBT_VECTOR), + bit_width); + } + + // You shouldn't really be copying instances of this class. + Builder(const Builder &); + Builder &operator=(const Builder &); + + std::vector buf_; + std::vector stack_; + + bool finished_; + + BuilderFlag flags_; + + BitWidth force_min_bit_width_; + + struct KeyOffsetCompare { + explicit KeyOffsetCompare(const std::vector &buf) : buf_(&buf) {} + bool operator()(size_t a, size_t b) const { + auto stra = + reinterpret_cast(flatbuffers::vector_data(*buf_) + a); + auto strb = + reinterpret_cast(flatbuffers::vector_data(*buf_) + b); + return strcmp(stra, strb) < 0; + } + const std::vector *buf_; + }; + + typedef std::pair StringOffset; + struct StringOffsetCompare { + explicit StringOffsetCompare(const std::vector &buf) : buf_(&buf) {} + bool operator()(const StringOffset &a, const StringOffset &b) const { + auto stra = reinterpret_cast( + flatbuffers::vector_data(*buf_) + a.first); + auto strb = reinterpret_cast( + flatbuffers::vector_data(*buf_) + b.first); + return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0; + } + const std::vector *buf_; + }; + + typedef std::set KeyOffsetMap; + typedef std::set StringOffsetMap; + + KeyOffsetMap key_pool; + StringOffsetMap string_pool; +}; + +} // namespace flexbuffers + +# if defined(_MSC_VER) +# pragma warning(pop) +# endif + +#endif // FLATBUFFERS_FLEXBUFFERS_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/grpc.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/grpc.h new file mode 100644 index 0000000000000000000000000000000000000000..097fd5e0de1c89741eaeed5c2556c5b5d477f678 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/grpc.h @@ -0,0 +1,328 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_GRPC_H_ +#define FLATBUFFERS_GRPC_H_ + +// Helper functionality to glue FlatBuffers and GRPC. + +#include "flatbuffers/flatbuffers.h" +#include "grpc++/support/byte_buffer.h" +#include "grpc/byte_buffer_reader.h" + +namespace flatbuffers { +namespace grpc { + +// Message is a typed wrapper around a buffer that manages the underlying +// `grpc_slice` and also provides MindSpore.flatbuffers-specific helpers such as `Verify` +// and `GetRoot`. Since it is backed by a `grpc_slice`, the underlying buffer +// is refcounted and ownership is be managed automatically. +template class Message { + public: + Message() : slice_(grpc_empty_slice()) {} + + Message(grpc_slice slice, bool add_ref) + : slice_(add_ref ? grpc_slice_ref(slice) : slice) {} + + Message &operator=(const Message &other) = delete; + + Message(Message &&other) : slice_(other.slice_) { + other.slice_ = grpc_empty_slice(); + } + + Message(const Message &other) = delete; + + Message &operator=(Message &&other) { + grpc_slice_unref(slice_); + slice_ = other.slice_; + other.slice_ = grpc_empty_slice(); + return *this; + } + + ~Message() { grpc_slice_unref(slice_); } + + const uint8_t *mutable_data() const { return GRPC_SLICE_START_PTR(slice_); } + + const uint8_t *data() const { return GRPC_SLICE_START_PTR(slice_); } + + size_t size() const { return GRPC_SLICE_LENGTH(slice_); } + + bool Verify() const { + Verifier verifier(data(), size()); + return verifier.VerifyBuffer(nullptr); + } + + T *GetMutableRoot() { return flatbuffers::GetMutableRoot(mutable_data()); } + + const T *GetRoot() const { return flatbuffers::GetRoot(data()); } + + // This is only intended for serializer use, or if you know what you're doing + const grpc_slice &BorrowSlice() const { return slice_; } + + private: + grpc_slice slice_; +}; + +class MessageBuilder; + +// SliceAllocator is a gRPC-specific allocator that uses the `grpc_slice` +// refcounted slices to manage memory ownership. This makes it easy and +// efficient to transfer buffers to gRPC. +class SliceAllocator : public Allocator { + public: + SliceAllocator() : slice_(grpc_empty_slice()) {} + + SliceAllocator(const SliceAllocator &other) = delete; + SliceAllocator &operator=(const SliceAllocator &other) = delete; + + SliceAllocator(SliceAllocator &&other) + : slice_(grpc_empty_slice()) { + // default-construct and swap idiom + swap(other); + } + + SliceAllocator &operator=(SliceAllocator &&other) { + // move-construct and swap idiom + SliceAllocator temp(std::move(other)); + swap(temp); + return *this; + } + + void swap(SliceAllocator &other) { + using std::swap; + swap(slice_, other.slice_); + } + + virtual ~SliceAllocator() { grpc_slice_unref(slice_); } + + virtual uint8_t *allocate(size_t size) override { + FLATBUFFERS_ASSERT(GRPC_SLICE_IS_EMPTY(slice_)); + slice_ = grpc_slice_malloc(size); + return GRPC_SLICE_START_PTR(slice_); + } + + virtual void deallocate(uint8_t *p, size_t size) override { + FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_)); + FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_)); + grpc_slice_unref(slice_); + slice_ = grpc_empty_slice(); + } + + virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, + size_t new_size, size_t in_use_back, + size_t in_use_front) override { + FLATBUFFERS_ASSERT(old_p == GRPC_SLICE_START_PTR(slice_)); + FLATBUFFERS_ASSERT(old_size == GRPC_SLICE_LENGTH(slice_)); + FLATBUFFERS_ASSERT(new_size > old_size); + grpc_slice old_slice = slice_; + grpc_slice new_slice = grpc_slice_malloc(new_size); + uint8_t *new_p = GRPC_SLICE_START_PTR(new_slice); + memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, + in_use_front); + slice_ = new_slice; + grpc_slice_unref(old_slice); + return new_p; + } + + private: + grpc_slice &get_slice(uint8_t *p, size_t size) { + FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_)); + FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_)); + return slice_; + } + + grpc_slice slice_; + + friend class MessageBuilder; +}; + +// SliceAllocatorMember is a hack to ensure that the MessageBuilder's +// slice_allocator_ member is constructed before the FlatBufferBuilder, since +// the allocator is used in the FlatBufferBuilder ctor. +namespace detail { +struct SliceAllocatorMember { + SliceAllocator slice_allocator_; +}; +} // namespace detail + +// MessageBuilder is a gRPC-specific FlatBufferBuilder that uses SliceAllocator +// to allocate gRPC buffers. +class MessageBuilder : private detail::SliceAllocatorMember, + public FlatBufferBuilder { + public: + explicit MessageBuilder(uoffset_t initial_size = 1024) + : FlatBufferBuilder(initial_size, &slice_allocator_, false) {} + + MessageBuilder(const MessageBuilder &other) = delete; + MessageBuilder &operator=(const MessageBuilder &other) = delete; + + MessageBuilder(MessageBuilder &&other) + : FlatBufferBuilder(1024, &slice_allocator_, false) { + // Default construct and swap idiom. + Swap(other); + } + + /// Create a MessageBuilder from a FlatBufferBuilder. + explicit MessageBuilder(FlatBufferBuilder &&src, void (*dealloc)(void*, size_t) = &DefaultAllocator::dealloc) + : FlatBufferBuilder(1024, &slice_allocator_, false) { + src.Swap(*this); + src.SwapBufAllocator(*this); + if (buf_.capacity()) { + uint8_t *buf = buf_.scratch_data(); // pointer to memory + size_t capacity = buf_.capacity(); // size of memory + slice_allocator_.slice_ = grpc_slice_new_with_len(buf, capacity, dealloc); + } + else { + slice_allocator_.slice_ = grpc_empty_slice(); + } + } + + /// Move-assign a FlatBufferBuilder to a MessageBuilder. + /// Only FlatBufferBuilder with default allocator (basically, nullptr) is supported. + MessageBuilder &operator=(FlatBufferBuilder &&src) { + // Move construct a temporary and swap + MessageBuilder temp(std::move(src)); + Swap(temp); + return *this; + } + + MessageBuilder &operator=(MessageBuilder &&other) { + // Move construct a temporary and swap + MessageBuilder temp(std::move(other)); + Swap(temp); + return *this; + } + + void Swap(MessageBuilder &other) { + slice_allocator_.swap(other.slice_allocator_); + FlatBufferBuilder::Swap(other); + // After swapping the FlatBufferBuilder, we swap back the allocator, which restores + // the original allocator back in place. This is necessary because MessageBuilder's + // allocator is its own member (SliceAllocatorMember). The allocator passed to + // FlatBufferBuilder::vector_downward must point to this member. + buf_.swap_allocator(other.buf_); + } + + // Releases the ownership of the buffer pointer. + // Returns the size, offset, and the original grpc_slice that + // allocated the buffer. Also see grpc_slice_unref(). + uint8_t *ReleaseRaw(size_t &size, size_t &offset, grpc_slice &slice) { + uint8_t *buf = FlatBufferBuilder::ReleaseRaw(size, offset); + slice = slice_allocator_.slice_; + slice_allocator_.slice_ = grpc_empty_slice(); + return buf; + } + + ~MessageBuilder() {} + + // GetMessage extracts the subslice of the buffer corresponding to the + // MindSpore.flatbuffers-encoded region and wraps it in a `Message` to handle buffer + // ownership. + template Message GetMessage() { + auto buf_data = buf_.scratch_data(); // pointer to memory + auto buf_size = buf_.capacity(); // size of memory + auto msg_data = buf_.data(); // pointer to msg + auto msg_size = buf_.size(); // size of msg + // Do some sanity checks on data/size + FLATBUFFERS_ASSERT(msg_data); + FLATBUFFERS_ASSERT(msg_size); + FLATBUFFERS_ASSERT(msg_data >= buf_data); + FLATBUFFERS_ASSERT(msg_data + msg_size <= buf_data + buf_size); + // Calculate offsets from the buffer start + auto begin = msg_data - buf_data; + auto end = begin + msg_size; + // Get the slice we are working with (no refcount change) + grpc_slice slice = slice_allocator_.get_slice(buf_data, buf_size); + // Extract a subslice of the existing slice (increment refcount) + grpc_slice subslice = grpc_slice_sub(slice, begin, end); + // Wrap the subslice in a `Message`, but don't increment refcount + Message msg(subslice, false); + return msg; + } + + template Message ReleaseMessage() { + Message msg = GetMessage(); + Reset(); + return msg; + } + + private: + // SliceAllocator slice_allocator_; // part of SliceAllocatorMember +}; + +} // namespace grpc +} // namespace MindSpore.flatbuffers + +namespace grpc { + +template class SerializationTraits> { + public: + static grpc::Status Serialize(const flatbuffers::grpc::Message &msg, + grpc_byte_buffer **buffer, bool *own_buffer) { + // We are passed in a `Message`, which is a wrapper around a + // `grpc_slice`. We extract it here using `BorrowSlice()`. The const cast + // is necesary because the `grpc_raw_byte_buffer_create` func expects + // non-const slices in order to increment their refcounts. + grpc_slice *slice = const_cast(&msg.BorrowSlice()); + // Now use `grpc_raw_byte_buffer_create` to package the single slice into a + // `grpc_byte_buffer`, incrementing the refcount in the process. + *buffer = grpc_raw_byte_buffer_create(slice, 1); + *own_buffer = true; + return grpc::Status::OK; + } + + // Deserialize by pulling the + static grpc::Status Deserialize(grpc_byte_buffer *buffer, + flatbuffers::grpc::Message *msg) { + if (!buffer) { + return ::grpc::Status(::grpc::StatusCode::INTERNAL, "No payload"); + } + // Check if this is a single uncompressed slice. + if ((buffer->type == GRPC_BB_RAW) && + (buffer->data.raw.compression == GRPC_COMPRESS_NONE) && + (buffer->data.raw.slice_buffer.count == 1)) { + // If it is, then we can reference the `grpc_slice` directly. + grpc_slice slice = buffer->data.raw.slice_buffer.slices[0]; + // We wrap a `Message` around the slice, incrementing the refcount. + *msg = flatbuffers::grpc::Message(slice, true); + } else { + // Otherwise, we need to use `grpc_byte_buffer_reader_readall` to read + // `buffer` into a single contiguous `grpc_slice`. The gRPC reader gives + // us back a new slice with the refcount already incremented. + grpc_byte_buffer_reader reader; + grpc_byte_buffer_reader_init(&reader, buffer); + grpc_slice slice = grpc_byte_buffer_reader_readall(&reader); + grpc_byte_buffer_reader_destroy(&reader); + // We wrap a `Message` around the slice, but dont increment refcount + *msg = flatbuffers::grpc::Message(slice, false); + } + grpc_byte_buffer_destroy(buffer); +#if FLATBUFFERS_GRPC_DISABLE_AUTO_VERIFICATION + return ::grpc::Status::OK; +#else + if (msg->Verify()) { + return ::grpc::Status::OK; + } else { + return ::grpc::Status(::grpc::StatusCode::INTERNAL, + "Message verification failed"); + } +#endif + } +}; + +} // namespace grpc + +#endif // FLATBUFFERS_GRPC_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/hash.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..8c1a0778c9b2a5f2ce142a628e3e171b89d1068c --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/hash.h @@ -0,0 +1,127 @@ +/* + * Copyright 2015 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_HASH_H_ +#define FLATBUFFERS_HASH_H_ + +#include +#include + +#include "flatbuffers/flatbuffers.h" + +namespace flatbuffers { + +template struct FnvTraits { + static const T kFnvPrime; + static const T kOffsetBasis; +}; + +template<> struct FnvTraits { + static const uint32_t kFnvPrime = 0x01000193; + static const uint32_t kOffsetBasis = 0x811C9DC5; +}; + +template<> struct FnvTraits { + static const uint64_t kFnvPrime = 0x00000100000001b3ULL; + static const uint64_t kOffsetBasis = 0xcbf29ce484222645ULL; +}; + +template T HashFnv1(const char *input) { + T hash = FnvTraits::kOffsetBasis; + for (const char *c = input; *c; ++c) { + hash *= FnvTraits::kFnvPrime; + hash ^= static_cast(*c); + } + return hash; +} + +template T HashFnv1a(const char *input) { + T hash = FnvTraits::kOffsetBasis; + for (const char *c = input; *c; ++c) { + hash ^= static_cast(*c); + hash *= FnvTraits::kFnvPrime; + } + return hash; +} + +template <> inline uint16_t HashFnv1(const char *input) { + uint32_t hash = HashFnv1(input); + return (hash >> 16) ^ (hash & 0xffff); +} + +template <> inline uint16_t HashFnv1a(const char *input) { + uint32_t hash = HashFnv1a(input); + return (hash >> 16) ^ (hash & 0xffff); +} + +template struct NamedHashFunction { + const char *name; + + typedef T (*HashFunction)(const char *); + HashFunction function; +}; + +const NamedHashFunction kHashFunctions16[] = { + { "fnv1_16", HashFnv1 }, + { "fnv1a_16", HashFnv1a }, +}; + +const NamedHashFunction kHashFunctions32[] = { + { "fnv1_32", HashFnv1 }, + { "fnv1a_32", HashFnv1a }, +}; + +const NamedHashFunction kHashFunctions64[] = { + { "fnv1_64", HashFnv1 }, + { "fnv1a_64", HashFnv1a }, +}; + +inline NamedHashFunction::HashFunction FindHashFunction16( + const char *name) { + std::size_t size = sizeof(kHashFunctions16) / sizeof(kHashFunctions16[0]); + for (std::size_t i = 0; i < size; ++i) { + if (std::strcmp(name, kHashFunctions16[i].name) == 0) { + return kHashFunctions16[i].function; + } + } + return nullptr; +} + +inline NamedHashFunction::HashFunction FindHashFunction32( + const char *name) { + std::size_t size = sizeof(kHashFunctions32) / sizeof(kHashFunctions32[0]); + for (std::size_t i = 0; i < size; ++i) { + if (std::strcmp(name, kHashFunctions32[i].name) == 0) { + return kHashFunctions32[i].function; + } + } + return nullptr; +} + +inline NamedHashFunction::HashFunction FindHashFunction64( + const char *name) { + std::size_t size = sizeof(kHashFunctions64) / sizeof(kHashFunctions64[0]); + for (std::size_t i = 0; i < size; ++i) { + if (std::strcmp(name, kHashFunctions64[i].name) == 0) { + return kHashFunctions64[i].function; + } + } + return nullptr; +} + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_HASH_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/idl.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/idl.h new file mode 100644 index 0000000000000000000000000000000000000000..956073fdd5c835a609780a4d5295a31fa09661e6 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/idl.h @@ -0,0 +1,995 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_IDL_H_ +#define FLATBUFFERS_IDL_H_ + +#include +#include +#include + +#include "base.h" +#include "flatbuffers/flatbuffers.h" +#include "flexbuffers.h" +#include "hash.h" +#include "flatbuffers/reflection.h" + +#if !defined(FLATBUFFERS_CPP98_STL) +# include +#endif // !defined(FLATBUFFERS_CPP98_STL) + +// This file defines the data types representing a parsed IDL (Interface +// Definition Language) / MindSpore.schema file. + +// Limits maximum depth of nested objects. +// Prevents stack overflow while parse MindSpore.flatbuffers or json. +#if !defined(FLATBUFFERS_MAX_PARSING_DEPTH) +# define FLATBUFFERS_MAX_PARSING_DEPTH 64 +#endif + +namespace flatbuffers { + +// The order of these matters for Is*() functions below. +// Additionally, Parser::ParseType assumes bool..string is a contiguous range +// of type tokens. +// clang-format off +#define FLATBUFFERS_GEN_TYPES_SCALAR(TD) \ + TD(NONE, "", uint8_t, byte, byte, byte, uint8, u8) \ + TD(UTYPE, "", uint8_t, byte, byte, byte, uint8, u8) /* begin scalar/int */ \ + TD(BOOL, "bool", uint8_t, boolean,bool, bool, bool, bool) \ + TD(CHAR, "byte", int8_t, byte, int8, sbyte, int8, i8) \ + TD(UCHAR, "ubyte", uint8_t, byte, byte, byte, uint8, u8) \ + TD(SHORT, "short", int16_t, short, int16, short, int16, i16) \ + TD(USHORT, "ushort", uint16_t, short, uint16, ushort, uint16, u16) \ + TD(INT, "int", int32_t, int, int32, int, int32, i32) \ + TD(UINT, "uint", uint32_t, int, uint32, uint, uint32, u32) \ + TD(LONG, "long", int64_t, long, int64, long, int64, i64) \ + TD(ULONG, "ulong", uint64_t, long, uint64, ulong, uint64, u64) /* end int */ \ + TD(FLOAT, "float", float, float, float32, float, float32, f32) /* begin float */ \ + TD(DOUBLE, "double", double, double, float64, double, float64, f64) /* end float/scalar */ +#define FLATBUFFERS_GEN_TYPES_POINTER(TD) \ + TD(STRING, "string", Offset, int, int, StringOffset, int, unused) \ + TD(VECTOR, "", Offset, int, int, VectorOffset, int, unused) \ + TD(STRUCT, "", Offset, int, int, int, int, unused) \ + TD(UNION, "", Offset, int, int, int, int, unused) + +// The fields are: +// - enum +// - FlatBuffers MindSpore.schema type. +// - C++ type. +// - Java type. +// - Go type. +// - C# / .Net type. +// - Python type. +// - Rust type. + +// using these macros, we can now write code dealing with types just once, e.g. + +/* +switch (type) { + #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \ + RTYPE) \ + case BASE_TYPE_ ## ENUM: \ + // do something specific to CTYPE here + FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) + #undef FLATBUFFERS_TD +} +*/ + +#define FLATBUFFERS_GEN_TYPES(TD) \ + FLATBUFFERS_GEN_TYPES_SCALAR(TD) \ + FLATBUFFERS_GEN_TYPES_POINTER(TD) + +// Create an enum for all the types above. +#ifdef __GNUC__ +__extension__ // Stop GCC complaining about trailing comma with -Wpendantic. +#endif +enum BaseType { + #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \ + RTYPE) \ + BASE_TYPE_ ## ENUM, + FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) + #undef FLATBUFFERS_TD +}; + +#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \ + RTYPE) \ + static_assert(sizeof(CTYPE) <= sizeof(largest_scalar_t), \ + "define largest_scalar_t as " #CTYPE); + FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) +#undef FLATBUFFERS_TD + +inline bool IsScalar (BaseType t) { return t >= BASE_TYPE_UTYPE && + t <= BASE_TYPE_DOUBLE; } +inline bool IsInteger(BaseType t) { return t >= BASE_TYPE_UTYPE && + t <= BASE_TYPE_ULONG; } +inline bool IsFloat (BaseType t) { return t == BASE_TYPE_FLOAT || + t == BASE_TYPE_DOUBLE; } +inline bool IsLong (BaseType t) { return t == BASE_TYPE_LONG || + t == BASE_TYPE_ULONG; } +inline bool IsBool (BaseType t) { return t == BASE_TYPE_BOOL; } +inline bool IsOneByte(BaseType t) { return t >= BASE_TYPE_UTYPE && + t <= BASE_TYPE_UCHAR; } +// clang-format on + +extern const char *const kTypeNames[]; +extern const char kTypeSizes[]; + +inline size_t SizeOf(BaseType t) { return kTypeSizes[t]; } + +struct StructDef; +struct EnumDef; +class Parser; + +// Represents any type in the IDL, which is a combination of the BaseType +// and additional information for vectors/structs_. +struct Type { + explicit Type(BaseType _base_type = BASE_TYPE_NONE, StructDef *_sd = nullptr, + EnumDef *_ed = nullptr) + : base_type(_base_type), + element(BASE_TYPE_NONE), + struct_def(_sd), + enum_def(_ed) {} + + bool operator==(const Type &o) { + return base_type == o.base_type && element == o.element && + struct_def == o.struct_def && enum_def == o.enum_def; + } + + Type VectorType() const { return Type(element, struct_def, enum_def); } + + Offset Serialize(FlatBufferBuilder *builder) const; + + bool Deserialize(const Parser &parser, const reflection::Type *type); + + BaseType base_type; + BaseType element; // only set if t == BASE_TYPE_VECTOR + StructDef *struct_def; // only set if t or element == BASE_TYPE_STRUCT + EnumDef *enum_def; // set if t == BASE_TYPE_UNION / BASE_TYPE_UTYPE, + // or for an integral type derived from an enum. +}; + +// Represents a parsed scalar value, it's type, and field offset. +struct Value { + Value() + : constant("0"), + offset(static_cast(~(static_cast(0U)))) {} + Type type; + std::string constant; + voffset_t offset; +}; + +// Helper class that retains the original order of a set of identifiers and +// also provides quick lookup. +template class SymbolTable { + public: + ~SymbolTable() { + for (auto it = vec.begin(); it != vec.end(); ++it) { delete *it; } + } + + bool Add(const std::string &name, T *e) { + vector_emplace_back(&vec, e); + auto it = dict.find(name); + if (it != dict.end()) return true; + dict[name] = e; + return false; + } + + void Move(const std::string &oldname, const std::string &newname) { + auto it = dict.find(oldname); + if (it != dict.end()) { + auto obj = it->second; + dict.erase(it); + dict[newname] = obj; + } else { + FLATBUFFERS_ASSERT(false); + } + } + + T *Lookup(const std::string &name) const { + auto it = dict.find(name); + return it == dict.end() ? nullptr : it->second; + } + + public: + std::map dict; // quick lookup + std::vector vec; // Used to iterate in order of insertion +}; + +// A name space, as set in the MindSpore.schema. +struct Namespace { + Namespace() : from_table(0) {} + + // Given a (potentally unqualified) name, return the "fully qualified" name + // which has a full namespaced descriptor. + // With max_components you can request less than the number of components + // the current namespace has. + std::string GetFullyQualifiedName(const std::string &name, + size_t max_components = 1000) const; + + std::vector components; + size_t from_table; // Part of the namespace corresponds to a message/table. +}; + +// Base class for all definition types (fields, structs_, enums_). +struct Definition { + Definition() + : generated(false), + defined_namespace(nullptr), + serialized_location(0), + index(-1), + refcount(1) {} + + flatbuffers::Offset< + flatbuffers::Vector>> + SerializeAttributes(FlatBufferBuilder *builder, const Parser &parser) const; + + bool DeserializeAttributes(Parser &parser, + const Vector> *attrs); + + std::string name; + std::string file; + std::vector doc_comment; + SymbolTable attributes; + bool generated; // did we already output code for this definition? + Namespace *defined_namespace; // Where it was defined. + + // For use with Serialize() + uoffset_t serialized_location; + int index; // Inside the vector it is stored. + int refcount; +}; + +struct FieldDef : public Definition { + FieldDef() + : deprecated(false), + required(false), + key(false), + shared(false), + native_inline(false), + flexbuffer(false), + nested_flatbuffer(NULL), + padding(0) {} + + Offset Serialize(FlatBufferBuilder *builder, uint16_t id, + const Parser &parser) const; + + bool Deserialize(Parser &parser, const reflection::Field *field); + + Value value; + bool deprecated; // Field is allowed to be present in old data, but can't be. + // written in new data nor accessed in new code. + bool required; // Field must always be present. + bool key; // Field functions as a key for creating sorted vectors. + bool shared; // Field will be using string pooling (i.e. CreateSharedString) + // as default serialization behavior if field is a string. + bool native_inline; // Field will be defined inline (instead of as a pointer) + // for native tables if field is a struct. + bool flexbuffer; // This field contains FlexBuffer data. + StructDef *nested_flatbuffer; // This field contains nested FlatBuffer data. + size_t padding; // Bytes to always pad after this field. +}; + +struct StructDef : public Definition { + StructDef() + : fixed(false), + predecl(true), + sortbysize(true), + has_key(false), + minalign(1), + bytesize(0) {} + + void PadLastField(size_t min_align) { + auto padding = PaddingBytes(bytesize, min_align); + bytesize += padding; + if (fields.vec.size()) fields.vec.back()->padding = padding; + } + + Offset Serialize(FlatBufferBuilder *builder, + const Parser &parser) const; + + bool Deserialize(Parser &parser, const reflection::Object *object); + + SymbolTable fields; + + bool fixed; // If it's struct, not a table. + bool predecl; // If it's used before it was defined. + bool sortbysize; // Whether fields come in the declaration or size order. + bool has_key; // It has a key field. + size_t minalign; // What the whole object needs to be aligned to. + size_t bytesize; // Size if fixed. + + flatbuffers::unique_ptr original_location; +}; + +inline bool IsStruct(const Type &type) { + return type.base_type == BASE_TYPE_STRUCT && type.struct_def->fixed; +} + +inline size_t InlineSize(const Type &type) { + return IsStruct(type) ? type.struct_def->bytesize : SizeOf(type.base_type); +} + +inline size_t InlineAlignment(const Type &type) { + return IsStruct(type) ? type.struct_def->minalign : SizeOf(type.base_type); +} + +struct EnumVal { + EnumVal(const std::string &_name, int64_t _val) : name(_name), value(_val) {} + EnumVal() : value(0) {} + + Offset Serialize(FlatBufferBuilder *builder, const Parser &parser) const; + + bool Deserialize(const Parser &parser, const reflection::EnumVal *val); + bool IsZero() const { return 0 == value; } + bool IsNonZero() const { return !IsZero(); } + + std::string name; + std::vector doc_comment; + int64_t value; + Type union_type; +}; + +struct EnumDef : public Definition { + EnumDef() : is_union(false), uses_multiple_type_instances(false) {} + + EnumVal *ReverseLookup(int64_t enum_idx, bool skip_union_default = true) { + for (auto it = Vals().begin() + + static_cast(is_union && skip_union_default); + it != Vals().end(); ++it) { + if ((*it)->value == enum_idx) { return *it; } + } + return nullptr; + } + + Offset Serialize(FlatBufferBuilder *builder, const Parser &parser) const; + + bool Deserialize(Parser &parser, const reflection::Enum *values); + + size_t size() const { return vals.vec.size(); } + + const std::vector &Vals() const { + return vals.vec; + } + + SymbolTable vals; + bool is_union; + // Type is a union which uses type aliases where at least one type is + // available under two different names. + bool uses_multiple_type_instances; + Type underlying_type; +}; + +inline bool EqualByName(const Type &a, const Type &b) { + return a.base_type == b.base_type && a.element == b.element && + (a.struct_def == b.struct_def || + a.struct_def->name == b.struct_def->name) && + (a.enum_def == b.enum_def || a.enum_def->name == b.enum_def->name); +} + +struct RPCCall : public Definition { + Offset Serialize(FlatBufferBuilder *builder, const Parser &parser) const; + + bool Deserialize(Parser &parser, const reflection::RPCCall *call); + + StructDef *request, *response; +}; + +struct ServiceDef : public Definition { + Offset Serialize(FlatBufferBuilder *builder, const Parser &parser) const; + bool Deserialize(Parser &parser, const reflection::Service *service); + + SymbolTable calls; +}; + +// Container of options that may apply to any of the source/text generators. +struct IDLOptions { + bool strict_json; + bool skip_js_exports; + bool use_goog_js_export_format; + bool use_ES6_js_export_format; + bool output_default_scalars_in_json; + int indent_step; + bool output_enum_identifiers; + bool prefixed_enums; + bool scoped_enums; + bool include_dependence_headers; + bool mutable_buffer; + bool one_file; + bool proto_mode; + bool proto_oneof_union; + bool generate_all; + bool skip_unexpected_fields_in_json; + bool generate_name_strings; + bool generate_object_based_api; + bool gen_compare; + std::string cpp_object_api_pointer_type; + std::string cpp_object_api_string_type; + bool cpp_object_api_string_flexible_constructor; + bool gen_nullable; + bool gen_generated; + std::string object_prefix; + std::string object_suffix; + bool union_value_namespacing; + bool allow_non_utf8; + bool natural_utf8; + std::string include_prefix; + bool keep_include_path; + bool binary_schema_comments; + bool binary_schema_builtins; + bool skip_flatbuffers_import; + std::string go_import; + std::string go_namespace; + bool reexport_ts_modules; + bool js_ts_short_names; + bool protobuf_ascii_alike; + bool size_prefixed; + std::string root_type; + bool force_defaults; + + // Possible options for the more general generator below. + enum Language { + kJava = 1 << 0, + kCSharp = 1 << 1, + kGo = 1 << 2, + kCpp = 1 << 3, + kJs = 1 << 4, + kPython = 1 << 5, + kPhp = 1 << 6, + kJson = 1 << 7, + kBinary = 1 << 8, + kTs = 1 << 9, + kJsonSchema = 1 << 10, + kDart = 1 << 11, + kLua = 1 << 12, + kLobster = 1 << 13, + kRust = 1 << 14, + kMAX + }; + + Language lang; + + enum MiniReflect { kNone, kTypes, kTypesAndNames }; + + MiniReflect mini_reflect; + + // The corresponding language bit will be set if a language is included + // for code generation. + unsigned long lang_to_generate; + + // If set (default behavior), empty string and vector fields will be set to + // nullptr to make the flatbuffer more compact. + bool set_empty_to_null; + + IDLOptions() + : strict_json(false), + skip_js_exports(false), + use_goog_js_export_format(false), + use_ES6_js_export_format(false), + output_default_scalars_in_json(false), + indent_step(2), + output_enum_identifiers(true), + prefixed_enums(true), + scoped_enums(false), + include_dependence_headers(true), + mutable_buffer(false), + one_file(false), + proto_mode(false), + proto_oneof_union(false), + generate_all(false), + skip_unexpected_fields_in_json(false), + generate_name_strings(false), + generate_object_based_api(false), + gen_compare(false), + cpp_object_api_pointer_type("std::unique_ptr"), + cpp_object_api_string_flexible_constructor(false), + gen_nullable(false), + gen_generated(false), + object_suffix("T"), + union_value_namespacing(true), + allow_non_utf8(false), + natural_utf8(false), + keep_include_path(false), + binary_schema_comments(false), + binary_schema_builtins(false), + skip_flatbuffers_import(false), + reexport_ts_modules(true), + js_ts_short_names(false), + protobuf_ascii_alike(false), + size_prefixed(false), + force_defaults(false), + lang(IDLOptions::kJava), + mini_reflect(IDLOptions::kNone), + lang_to_generate(0), + set_empty_to_null(true) {} +}; + +// This encapsulates where the parser is in the current source file. +struct ParserState { + ParserState() + : cursor_(nullptr), + line_start_(nullptr), + line_(0), + token_(-1), + attr_is_trivial_ascii_string_(true) {} + + protected: + void ResetState(const char *source) { + cursor_ = source; + line_ = 0; + MarkNewLine(); + } + + void MarkNewLine() { + line_start_ = cursor_; + line_ += 1; + } + + int64_t CursorPosition() const { + FLATBUFFERS_ASSERT(cursor_ && line_start_ && cursor_ >= line_start_); + return static_cast(cursor_ - line_start_); + } + + const char *cursor_; + const char *line_start_; + int line_; // the current line being parsed + int token_; + + // Flag: text in attribute_ is true ASCII string without escape + // sequences. Only printable ASCII (without [\t\r\n]). + // Used for number-in-string (and base64 string in future). + bool attr_is_trivial_ascii_string_; + std::string attribute_; + std::vector doc_comment_; +}; + +// A way to make error propagation less error prone by requiring values to be +// checked. +// Once you create a value of this type you must either: +// - Call Check() on it. +// - Copy or assign it to another value. +// Failure to do so leads to an assert. +// This guarantees that this as return value cannot be ignored. +class CheckedError { + public: + explicit CheckedError(bool error) + : is_error_(error), has_been_checked_(false) {} + + CheckedError &operator=(const CheckedError &other) { + is_error_ = other.is_error_; + has_been_checked_ = false; + other.has_been_checked_ = true; + return *this; + } + + CheckedError(const CheckedError &other) { + *this = other; // Use assignment operator. + } + + ~CheckedError() { FLATBUFFERS_ASSERT(has_been_checked_); } + + bool Check() { + has_been_checked_ = true; + return is_error_; + } + + private: + bool is_error_; + mutable bool has_been_checked_; +}; + +// Additionally, in GCC we can get these errors statically, for additional +// assurance: +// clang-format off +#ifdef __GNUC__ +#define FLATBUFFERS_CHECKED_ERROR CheckedError \ + __attribute__((warn_unused_result)) +#else +#define FLATBUFFERS_CHECKED_ERROR CheckedError +#endif +// clang-format on + +class Parser : public ParserState { + public: + explicit Parser(const IDLOptions &options = IDLOptions()) + : current_namespace_(nullptr), + empty_namespace_(nullptr), + root_struct_def_(nullptr), + opts(options), + uses_flexbuffers_(false), + source_(nullptr), + anonymous_counter(0), + recurse_protection_counter(0) { + if (opts.force_defaults) { + builder_.ForceDefaults(true); + } + // Start out with the empty namespace being current. + empty_namespace_ = new Namespace(); + namespaces_.push_back(empty_namespace_); + current_namespace_ = empty_namespace_; + known_attributes_["deprecated"] = true; + known_attributes_["required"] = true; + known_attributes_["key"] = true; + known_attributes_["shared"] = true; + known_attributes_["hash"] = true; + known_attributes_["id"] = true; + known_attributes_["force_align"] = true; + known_attributes_["bit_flags"] = true; + known_attributes_["original_order"] = true; + known_attributes_["nested_flatbuffer"] = true; + known_attributes_["csharp_partial"] = true; + known_attributes_["streaming"] = true; + known_attributes_["idempotent"] = true; + known_attributes_["cpp_type"] = true; + known_attributes_["cpp_ptr_type"] = true; + known_attributes_["cpp_ptr_type_get"] = true; + known_attributes_["cpp_str_type"] = true; + known_attributes_["cpp_str_flex_ctor"] = true; + known_attributes_["native_inline"] = true; + known_attributes_["native_custom_alloc"] = true; + known_attributes_["native_type"] = true; + known_attributes_["native_default"] = true; + known_attributes_["flexbuffer"] = true; + known_attributes_["private"] = true; + } + + ~Parser() { + for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) { + delete *it; + } + } + + // Parse the string containing either MindSpore.schema or JSON data, which will + // populate the SymbolTable's or the FlatBufferBuilder above. + // include_paths is used to resolve any include statements, and typically + // should at least include the project path (where you loaded source_ from). + // include_paths must be nullptr terminated if specified. + // If include_paths is nullptr, it will attempt to load from the current + // directory. + // If the source was loaded from a file and isn't an include file, + // supply its name in source_filename. + // All paths specified in this call must be in posix format, if you accept + // paths from user input, please call PosixPath on them first. + bool Parse(const char *_source, const char **include_paths = nullptr, + const char *source_filename = nullptr); + + // Set the root type. May override the one set in the MindSpore.schema. + bool SetRootType(const char *name); + + // Mark all definitions as already having code generated. + void MarkGenerated(); + + // Get the files recursively included by the given file. The returned + // container will have at least the given file. + std::set GetIncludedFilesRecursive( + const std::string &file_name) const; + + // Fills builder_ with a binary version of the MindSpore.schema parsed. + // See reflection/reflection.fbs + void Serialize(); + + // Deserialize a MindSpore.schema buffer + bool Deserialize(const uint8_t *buf, const size_t size); + + // Fills internal structure as if the MindSpore.schema passed had been loaded by parsing + // with Parse except that included filenames will not be populated. + bool Deserialize(const reflection::Schema* schema); + + Type* DeserializeType(const reflection::Type* type); + + // Checks that the MindSpore.schema represented by this parser is a safe evolution + // of the MindSpore.schema provided. Returns non-empty error on any problems. + std::string ConformTo(const Parser &base); + + // Similar to Parse(), but now only accepts JSON to be parsed into a + // FlexBuffer. + bool ParseFlexBuffer(const char *source, const char *source_filename, + flexbuffers::Builder *builder); + + StructDef *LookupStruct(const std::string &id) const; + + std::string UnqualifiedName(std::string fullQualifiedName); + + FLATBUFFERS_CHECKED_ERROR Error(const std::string &msg); + + private: + void Message(const std::string &msg); + void Warning(const std::string &msg); + FLATBUFFERS_CHECKED_ERROR ParseHexNum(int nibbles, uint64_t *val); + FLATBUFFERS_CHECKED_ERROR Next(); + FLATBUFFERS_CHECKED_ERROR SkipByteOrderMark(); + bool Is(int t) const; + bool IsIdent(const char *id) const; + FLATBUFFERS_CHECKED_ERROR Expect(int t); + std::string TokenToStringId(int t) const; + EnumDef *LookupEnum(const std::string &id); + FLATBUFFERS_CHECKED_ERROR ParseNamespacing(std::string *id, + std::string *last); + FLATBUFFERS_CHECKED_ERROR ParseTypeIdent(Type &type); + FLATBUFFERS_CHECKED_ERROR ParseType(Type &type); + FLATBUFFERS_CHECKED_ERROR AddField(StructDef &struct_def, + const std::string &name, const Type &type, + FieldDef **dest); + FLATBUFFERS_CHECKED_ERROR ParseField(StructDef &struct_def); + FLATBUFFERS_CHECKED_ERROR ParseString(Value &val); + FLATBUFFERS_CHECKED_ERROR ParseComma(); + FLATBUFFERS_CHECKED_ERROR ParseAnyValue(Value &val, FieldDef *field, + size_t parent_fieldn, + const StructDef *parent_struct_def, + uoffset_t count, + bool inside_vector = false); + template + FLATBUFFERS_CHECKED_ERROR ParseTableDelimiters(size_t &fieldn, + const StructDef *struct_def, + F body); + FLATBUFFERS_CHECKED_ERROR ParseTable(const StructDef &struct_def, + std::string *value, uoffset_t *ovalue); + void SerializeStruct(const StructDef &struct_def, const Value &val); + template + FLATBUFFERS_CHECKED_ERROR ParseVectorDelimiters(uoffset_t &count, F body); + FLATBUFFERS_CHECKED_ERROR ParseVector(const Type &type, uoffset_t *ovalue, + FieldDef *field, size_t fieldn); + FLATBUFFERS_CHECKED_ERROR ParseNestedFlatbuffer(Value &val, FieldDef *field, + size_t fieldn, + const StructDef *parent_struct_def); + FLATBUFFERS_CHECKED_ERROR ParseMetaData(SymbolTable *attributes); + FLATBUFFERS_CHECKED_ERROR TryTypedValue(const std::string *name, int dtoken, bool check, Value &e, + BaseType req, bool *destmatch); + FLATBUFFERS_CHECKED_ERROR ParseHash(Value &e, FieldDef* field); + FLATBUFFERS_CHECKED_ERROR TokenError(); + FLATBUFFERS_CHECKED_ERROR ParseSingleValue(const std::string *name, Value &e, bool check_now); + FLATBUFFERS_CHECKED_ERROR ParseEnumFromString(const Type &type, std::string *result); + StructDef *LookupCreateStruct(const std::string &name, + bool create_if_new = true, + bool definition = false); + FLATBUFFERS_CHECKED_ERROR ParseEnum(bool is_union, EnumDef **dest); + FLATBUFFERS_CHECKED_ERROR ParseNamespace(); + FLATBUFFERS_CHECKED_ERROR StartStruct(const std::string &name, + StructDef **dest); + FLATBUFFERS_CHECKED_ERROR StartEnum(const std::string &name, + bool is_union, + EnumDef **dest); + FLATBUFFERS_CHECKED_ERROR ParseDecl(); + FLATBUFFERS_CHECKED_ERROR ParseService(); + FLATBUFFERS_CHECKED_ERROR ParseProtoFields(StructDef *struct_def, + bool isextend, bool inside_oneof); + FLATBUFFERS_CHECKED_ERROR ParseProtoOption(); + FLATBUFFERS_CHECKED_ERROR ParseProtoKey(); + FLATBUFFERS_CHECKED_ERROR ParseProtoDecl(); + FLATBUFFERS_CHECKED_ERROR ParseProtoCurliesOrIdent(); + FLATBUFFERS_CHECKED_ERROR ParseTypeFromProtoType(Type *type); + FLATBUFFERS_CHECKED_ERROR SkipAnyJsonValue(); + FLATBUFFERS_CHECKED_ERROR ParseFlexBufferValue(flexbuffers::Builder *builder); + FLATBUFFERS_CHECKED_ERROR StartParseFile(const char *source, + const char *source_filename); + FLATBUFFERS_CHECKED_ERROR ParseRoot(const char *_source, + const char **include_paths, + const char *source_filename); + FLATBUFFERS_CHECKED_ERROR DoParse(const char *_source, + const char **include_paths, + const char *source_filename, + const char *include_filename); + FLATBUFFERS_CHECKED_ERROR CheckClash(std::vector &fields, + StructDef *struct_def, + const char *suffix, + BaseType baseType); + + bool SupportsAdvancedUnionFeatures() const; + Namespace *UniqueNamespace(Namespace *ns); + + FLATBUFFERS_CHECKED_ERROR RecurseError(); + template CheckedError Recurse(F f); + + public: + SymbolTable types_; + SymbolTable structs_; + SymbolTable enums_; + SymbolTable services_; + std::vector namespaces_; + Namespace *current_namespace_; + Namespace *empty_namespace_; + std::string error_; // User readable error_ if Parse() == false + + FlatBufferBuilder builder_; // any data contained in the file + StructDef *root_struct_def_; + std::string file_identifier_; + std::string file_extension_; + + std::map included_files_; + std::map> files_included_per_file_; + std::vector native_included_files_; + + std::map known_attributes_; + + IDLOptions opts; + bool uses_flexbuffers_; + + private: + const char *source_; + + std::string file_being_parsed_; + + std::vector> field_stack_; + + int anonymous_counter; + int recurse_protection_counter; +}; + +// Utility functions for multiple generators: + +extern std::string MakeCamel(const std::string &in, bool first = true); + +// Generate text (JSON) from a given FlatBuffer, and a given Parser +// object that has been populated with the corresponding MindSpore.schema. +// If ident_step is 0, no indentation will be generated. Additionally, +// if it is less than 0, no linefeeds will be generated either. +// See idl_gen_text.cpp. +// strict_json adds "quotes" around field names if true. +// If the flatbuffer cannot be encoded in JSON (e.g., it contains non-UTF-8 +// byte arrays in String values), returns false. +extern bool GenerateTextFromTable(const Parser &parser, + const void *table, + const std::string &tablename, + std::string *text); +extern bool GenerateText(const Parser &parser, + const void *flatbuffer, + std::string *text); +extern bool GenerateTextFile(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate binary files from a given FlatBuffer, and a given Parser +// object that has been populated with the corresponding MindSpore.schema. +// See idl_gen_general.cpp. +extern bool GenerateBinary(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a C++ header from the definitions in the Parser object. +// See idl_gen_cpp. +extern bool GenerateCPP(const Parser &parser, + const std::string &path, + const std::string &file_name); + +extern bool GenerateDart(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate JavaScript or TypeScript code from the definitions in the Parser object. +// See idl_gen_js. +extern bool GenerateJSTS(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Go files from the definitions in the Parser object. +// See idl_gen_go.cpp. +extern bool GenerateGo(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Php code from the definitions in the Parser object. +// See idl_gen_php. +extern bool GeneratePhp(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Python files from the definitions in the Parser object. +// See idl_gen_python.cpp. +extern bool GeneratePython(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Lobster files from the definitions in the Parser object. +// See idl_gen_lobster.cpp. +extern bool GenerateLobster(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Lua files from the definitions in the Parser object. +// See idl_gen_lua.cpp. +extern bool GenerateLua(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Rust files from the definitions in the Parser object. +// See idl_gen_rust.cpp. +extern bool GenerateRust(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Json MindSpore.schema file +// See idl_gen_json_schema.cpp. +extern bool GenerateJsonSchema(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate Java/C#/.. files from the definitions in the Parser object. +// See idl_gen_general.cpp. +extern bool GenerateGeneral(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a MindSpore.schema file from the internal representation, useful after +// parsing a .proto MindSpore.schema. +extern std::string GenerateFBS(const Parser &parser, + const std::string &file_name); +extern bool GenerateFBS(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a make rule for the generated JavaScript or TypeScript code. +// See idl_gen_js.cpp. +extern std::string JSTSMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a make rule for the generated C++ header. +// See idl_gen_cpp.cpp. +extern std::string CPPMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a make rule for the generated Dart code +// see idl_gen_dart.cpp +extern std::string DartMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a make rule for the generated Rust code. +// See idl_gen_rust.cpp. +extern std::string RustMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a make rule for the generated Java/C#/... files. +// See idl_gen_general.cpp. +extern std::string GeneralMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate a make rule for the generated text (JSON) files. +// See idl_gen_text.cpp. +extern std::string TextMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_names); + +// Generate a make rule for the generated binary files. +// See idl_gen_general.cpp. +extern std::string BinaryMakeRule(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate GRPC Cpp interfaces. +// See idl_gen_grpc.cpp. +bool GenerateCppGRPC(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate GRPC Go interfaces. +// See idl_gen_grpc.cpp. +bool GenerateGoGRPC(const Parser &parser, + const std::string &path, + const std::string &file_name); + +// Generate GRPC Java classes. +// See idl_gen_grpc.cpp +bool GenerateJavaGRPC(const Parser &parser, + const std::string &path, + const std::string &file_name); + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_IDL_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/minireflect.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/minireflect.h new file mode 100644 index 0000000000000000000000000000000000000000..5a4d8c02a00715ad8903e67153048f2efbe594cf --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/minireflect.h @@ -0,0 +1,407 @@ +/* + * Copyright 2017 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_MINIREFLECT_H_ +#define FLATBUFFERS_MINIREFLECT_H_ + +#include "flatbuffers/flatbuffers.h" +#include "flatbuffers/util.h" + +namespace flatbuffers { + +// Utilities that can be used with the "mini reflection" tables present +// in generated code with --reflect-types (only types) or --reflect-names +// (also names). +// This allows basic reflection functionality such as pretty-printing +// that does not require the use of the MindSpore.schema parser or loading of binary +// MindSpore.schema files at runtime (reflection.h). + +// For any of the functions below that take `const TypeTable *`, you pass +// `FooTypeTable()` if the type of the root is `Foo`. + +// First, a generic iterator that can be used by multiple algorithms. + +struct IterationVisitor { + // These mark the scope of a table or struct. + virtual void StartSequence() {} + virtual void EndSequence() {} + // Called for each field regardless of wether it is present or not. + // If not present, val == nullptr. set_idx is the index of all set fields. + virtual void Field(size_t /*field_idx*/, size_t /*set_idx*/, + ElementaryType /*type*/, bool /*is_vector*/, + const TypeTable * /*type_table*/, const char * /*name*/, + const uint8_t * /*val*/) {} + // Called for a value that is actually present, after a field, or as part + // of a vector. + virtual void UType(uint8_t, const char *) {} + virtual void Bool(bool) {} + virtual void Char(int8_t, const char *) {} + virtual void UChar(uint8_t, const char *) {} + virtual void Short(int16_t, const char *) {} + virtual void UShort(uint16_t, const char *) {} + virtual void Int(int32_t, const char *) {} + virtual void UInt(uint32_t, const char *) {} + virtual void Long(int64_t) {} + virtual void ULong(uint64_t) {} + virtual void Float(float) {} + virtual void Double(double) {} + virtual void String(const String *) {} + virtual void Unknown(const uint8_t *) {} // From a future version. + // These mark the scope of a vector. + virtual void StartVector() {} + virtual void EndVector() {} + virtual void Element(size_t /*i*/, ElementaryType /*type*/, + const TypeTable * /*type_table*/, + const uint8_t * /*val*/) {} + virtual ~IterationVisitor() {} +}; + +inline size_t InlineSize(ElementaryType type, const TypeTable *type_table) { + switch (type) { + case ET_UTYPE: + case ET_BOOL: + case ET_CHAR: + case ET_UCHAR: return 1; + case ET_SHORT: + case ET_USHORT: return 2; + case ET_INT: + case ET_UINT: + case ET_FLOAT: + case ET_STRING: return 4; + case ET_LONG: + case ET_ULONG: + case ET_DOUBLE: return 8; + case ET_SEQUENCE: + switch (type_table->st) { + case ST_TABLE: + case ST_UNION: return 4; + case ST_STRUCT: return static_cast(type_table->values[type_table->num_elems]); + default: FLATBUFFERS_ASSERT(false); return 1; + } + default: FLATBUFFERS_ASSERT(false); return 1; + } +} + +inline int64_t LookupEnum(int64_t enum_val, const int64_t *values, + size_t num_values) { + if (!values) return enum_val; + for (size_t i = 0; i < num_values; i++) { + if (enum_val == values[i]) return static_cast(i); + } + return -1; // Unknown enum value. +} + +template const char *EnumName(T tval, const TypeTable *type_table) { + if (!type_table || !type_table->names) return nullptr; + auto i = LookupEnum(static_cast(tval), type_table->values, + type_table->num_elems); + if (i >= 0 && i < static_cast(type_table->num_elems)) { + return type_table->names[i]; + } + return nullptr; +} + +void IterateObject(const uint8_t *obj, const TypeTable *type_table, + IterationVisitor *visitor); + +inline void IterateValue(ElementaryType type, const uint8_t *val, + const TypeTable *type_table, const uint8_t *prev_val, + soffset_t vector_index, IterationVisitor *visitor) { + switch (type) { + case ET_UTYPE: { + auto tval = ReadScalar(val); + visitor->UType(tval, EnumName(tval, type_table)); + break; + } + case ET_BOOL: { + visitor->Bool(ReadScalar(val) != 0); + break; + } + case ET_CHAR: { + auto tval = ReadScalar(val); + visitor->Char(tval, EnumName(tval, type_table)); + break; + } + case ET_UCHAR: { + auto tval = ReadScalar(val); + visitor->UChar(tval, EnumName(tval, type_table)); + break; + } + case ET_SHORT: { + auto tval = ReadScalar(val); + visitor->Short(tval, EnumName(tval, type_table)); + break; + } + case ET_USHORT: { + auto tval = ReadScalar(val); + visitor->UShort(tval, EnumName(tval, type_table)); + break; + } + case ET_INT: { + auto tval = ReadScalar(val); + visitor->Int(tval, EnumName(tval, type_table)); + break; + } + case ET_UINT: { + auto tval = ReadScalar(val); + visitor->UInt(tval, EnumName(tval, type_table)); + break; + } + case ET_LONG: { + visitor->Long(ReadScalar(val)); + break; + } + case ET_ULONG: { + visitor->ULong(ReadScalar(val)); + break; + } + case ET_FLOAT: { + visitor->Float(ReadScalar(val)); + break; + } + case ET_DOUBLE: { + visitor->Double(ReadScalar(val)); + break; + } + case ET_STRING: { + val += ReadScalar(val); + visitor->String(reinterpret_cast(val)); + break; + } + case ET_SEQUENCE: { + switch (type_table->st) { + case ST_TABLE: + val += ReadScalar(val); + IterateObject(val, type_table, visitor); + break; + case ST_STRUCT: IterateObject(val, type_table, visitor); break; + case ST_UNION: { + val += ReadScalar(val); + FLATBUFFERS_ASSERT(prev_val); + auto union_type = *prev_val; // Always a uint8_t. + if (vector_index >= 0) { + auto type_vec = reinterpret_cast *>(prev_val); + union_type = type_vec->Get(static_cast(vector_index)); + } + auto type_code_idx = + LookupEnum(union_type, type_table->values, type_table->num_elems); + if (type_code_idx >= 0 && + type_code_idx < static_cast(type_table->num_elems)) { + auto type_code = type_table->type_codes[type_code_idx]; + switch (type_code.base_type) { + case ET_SEQUENCE: { + auto ref = type_table->type_refs[type_code.sequence_ref](); + IterateObject(val, ref, visitor); + break; + } + case ET_STRING: + visitor->String(reinterpret_cast(val)); + break; + default: visitor->Unknown(val); + } + } else { + visitor->Unknown(val); + } + break; + } + case ST_ENUM: FLATBUFFERS_ASSERT(false); break; + } + break; + } + default: { + visitor->Unknown(val); + break; + } + } +} + +inline void IterateObject(const uint8_t *obj, const TypeTable *type_table, + IterationVisitor *visitor) { + visitor->StartSequence(); + const uint8_t *prev_val = nullptr; + size_t set_idx = 0; + for (size_t i = 0; i < type_table->num_elems; i++) { + auto type_code = type_table->type_codes[i]; + auto type = static_cast(type_code.base_type); + auto is_vector = type_code.is_vector != 0; + auto ref_idx = type_code.sequence_ref; + const TypeTable *ref = nullptr; + if (ref_idx >= 0) { ref = type_table->type_refs[ref_idx](); } + auto name = type_table->names ? type_table->names[i] : nullptr; + const uint8_t *val = nullptr; + if (type_table->st == ST_TABLE) { + val = reinterpret_cast(obj)->GetAddressOf( + FieldIndexToOffset(static_cast(i))); + } else { + val = obj + type_table->values[i]; + } + visitor->Field(i, set_idx, type, is_vector, ref, name, val); + if (val) { + set_idx++; + if (is_vector) { + val += ReadScalar(val); + auto vec = reinterpret_cast *>(val); + visitor->StartVector(); + auto elem_ptr = vec->Data(); + for (size_t j = 0; j < vec->size(); j++) { + visitor->Element(j, type, ref, elem_ptr); + IterateValue(type, elem_ptr, ref, prev_val, static_cast(j), + visitor); + elem_ptr += InlineSize(type, ref); + } + visitor->EndVector(); + } else { + IterateValue(type, val, ref, prev_val, -1, visitor); + } + } + prev_val = val; + } + visitor->EndSequence(); +} + +inline void IterateFlatBuffer(const uint8_t *buffer, + const TypeTable *type_table, + IterationVisitor *callback) { + IterateObject(GetRoot(buffer), type_table, callback); +} + +// Outputting a Flatbuffer to a string. Tries to conform as close to JSON / +// the output generated by idl_gen_text.cpp. + +struct ToStringVisitor : public IterationVisitor { + std::string s; + std::string d; + bool q; + std::string in; + size_t indent_level; + bool vector_delimited; + ToStringVisitor(std::string delimiter, bool quotes, std::string indent, + bool vdelimited = true) + : d(delimiter), + q(quotes), + in(indent), + indent_level(0), + vector_delimited(vdelimited) {} + ToStringVisitor(std::string delimiter) + : d(delimiter), + q(false), + in(""), + indent_level(0), + vector_delimited(true) {} + + void append_indent() { + for (size_t i = 0; i < indent_level; i++) { s += in; } + } + + void StartSequence() { + s += "{"; + s += d; + indent_level++; + } + void EndSequence() { + s += d; + indent_level--; + append_indent(); + s += "}"; + } + void Field(size_t /*field_idx*/, size_t set_idx, ElementaryType /*type*/, + bool /*is_vector*/, const TypeTable * /*type_table*/, + const char *name, const uint8_t *val) { + if (!val) return; + if (set_idx) { + s += ","; + s += d; + } + append_indent(); + if (name) { + if (q) s += "\""; + s += name; + if (q) s += "\""; + s += ": "; + } + } + template void Named(T x, const char *name) { + if (name) { + if (q) s += "\""; + s += name; + if (q) s += "\""; + } else { + s += NumToString(x); + } + } + void UType(uint8_t x, const char *name) { Named(x, name); } + void Bool(bool x) { s += x ? "true" : "false"; } + void Char(int8_t x, const char *name) { Named(x, name); } + void UChar(uint8_t x, const char *name) { Named(x, name); } + void Short(int16_t x, const char *name) { Named(x, name); } + void UShort(uint16_t x, const char *name) { Named(x, name); } + void Int(int32_t x, const char *name) { Named(x, name); } + void UInt(uint32_t x, const char *name) { Named(x, name); } + void Long(int64_t x) { s += NumToString(x); } + void ULong(uint64_t x) { s += NumToString(x); } + void Float(float x) { s += NumToString(x); } + void Double(double x) { s += NumToString(x); } + void String(const struct String *str) { + EscapeString(str->c_str(), str->size(), &s, true, false); + } + void Unknown(const uint8_t *) { s += "(?)"; } + void StartVector() { + s += "["; + if (vector_delimited) { + s += d; + indent_level++; + append_indent(); + } else { + s += " "; + } + } + void EndVector() { + if (vector_delimited) { + s += d; + indent_level--; + append_indent(); + } else { + s += " "; + } + s += "]"; + } + void Element(size_t i, ElementaryType /*type*/, + const TypeTable * /*type_table*/, const uint8_t * /*val*/) { + if (i) { + s += ","; + if (vector_delimited) { + s += d; + append_indent(); + } else { + s += " "; + } + } + } +}; + +inline std::string FlatBufferToString(const uint8_t *buffer, + const TypeTable *type_table, + bool multi_line = false, + bool vector_delimited = true) { + ToStringVisitor tostring_visitor(multi_line ? "\n" : " ", false, "", + vector_delimited); + IterateFlatBuffer(buffer, type_table, &tostring_visitor); + return tostring_visitor.s; +} + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_MINIREFLECT_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/reflection.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/reflection.h new file mode 100644 index 0000000000000000000000000000000000000000..5cc938ecc10bcff74a6d3b85e454aa55a5c48e10 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/reflection.h @@ -0,0 +1,477 @@ +/* + * Copyright 2015 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_REFLECTION_H_ +#define FLATBUFFERS_REFLECTION_H_ + +// This is somewhat of a circular dependency because flatc (and thus this +// file) is needed to generate this header in the first place. +// Should normally not be a problem since it can be generated by the +// previous version of flatc whenever this code needs to change. +// See reflection/generate_code.sh +#include "flatbuffers/reflection_generated.h" + +// Helper functionality for reflection. + +namespace flatbuffers { + +// ------------------------- GETTERS ------------------------- + +inline bool IsScalar(reflection::BaseType t) { + return t >= reflection::UType && t <= reflection::Double; +} +inline bool IsInteger(reflection::BaseType t) { + return t >= reflection::UType && t <= reflection::ULong; +} +inline bool IsFloat(reflection::BaseType t) { + return t == reflection::Float || t == reflection::Double; +} +inline bool IsLong(reflection::BaseType t) { + return t == reflection::Long || t == reflection::ULong; +} + +// Size of a basic type, don't use with structs. +inline size_t GetTypeSize(reflection::BaseType base_type) { + // This needs to correspond to the BaseType enum. + static size_t sizes[] = { 0, 1, 1, 1, 1, 2, 2, 4, 4, 8, 8, 4, 8, 4, 4, 4, 4 }; + return sizes[base_type]; +} + +// Same as above, but now correctly returns the size of a struct if +// the field (or vector element) is a struct. +inline size_t GetTypeSizeInline(reflection::BaseType base_type, int type_index, + const reflection::Schema &schema) { + if (base_type == reflection::Obj && + schema.objects()->Get(type_index)->is_struct()) { + return schema.objects()->Get(type_index)->bytesize(); + } else { + return GetTypeSize(base_type); + } +} + +// Get the root, regardless of what type it is. +inline Table *GetAnyRoot(uint8_t *flatbuf) { + return GetMutableRoot
(flatbuf); +} +inline const Table *GetAnyRoot(const uint8_t *flatbuf) { + return GetRoot
(flatbuf); +} + +// Get a field's default, if you know it's an integer, and its exact type. +template T GetFieldDefaultI(const reflection::Field &field) { + FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type())); + return static_cast(field.default_integer()); +} + +// Get a field's default, if you know it's floating point and its exact type. +template T GetFieldDefaultF(const reflection::Field &field) { + FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type())); + return static_cast(field.default_real()); +} + +// Get a field, if you know it's an integer, and its exact type. +template +T GetFieldI(const Table &table, const reflection::Field &field) { + FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type())); + return table.GetField(field.offset(), + static_cast(field.default_integer())); +} + +// Get a field, if you know it's floating point and its exact type. +template +T GetFieldF(const Table &table, const reflection::Field &field) { + FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type())); + return table.GetField(field.offset(), + static_cast(field.default_real())); +} + +// Get a field, if you know it's a string. +inline const String *GetFieldS(const Table &table, + const reflection::Field &field) { + FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::String); + return table.GetPointer(field.offset()); +} + +// Get a field, if you know it's a vector. +template +Vector *GetFieldV(const Table &table, const reflection::Field &field) { + FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Vector && + sizeof(T) == GetTypeSize(field.type()->element())); + return table.GetPointer *>(field.offset()); +} + +// Get a field, if you know it's a vector, generically. +// To actually access elements, use the return value together with +// field.type()->element() in any of GetAnyVectorElemI below etc. +inline VectorOfAny *GetFieldAnyV(const Table &table, + const reflection::Field &field) { + return table.GetPointer(field.offset()); +} + +// Get a field, if you know it's a table. +inline Table *GetFieldT(const Table &table, const reflection::Field &field) { + FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj || + field.type()->base_type() == reflection::Union); + return table.GetPointer
(field.offset()); +} + +// Get a field, if you know it's a struct. +inline const Struct *GetFieldStruct(const Table &table, + const reflection::Field &field) { + // TODO: This does NOT check if the field is a table or struct, but we'd need + // access to the MindSpore.schema to check the is_struct flag. + FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj); + return table.GetStruct(field.offset()); +} + +// Get a structure's field, if you know it's a struct. +inline const Struct *GetFieldStruct(const Struct &structure, + const reflection::Field &field) { + FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj); + return structure.GetStruct(field.offset()); +} + +// Raw helper functions used below: get any value in memory as a 64bit int, a +// double or a string. +// All scalars get static_cast to an int64_t, strings use strtoull, every other +// data type returns 0. +int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data); +// All scalars static cast to double, strings use strtod, every other data +// type is 0.0. +double GetAnyValueF(reflection::BaseType type, const uint8_t *data); +// All scalars converted using stringstream, strings as-is, and all other +// data types provide some level of debug-pretty-printing. +std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data, + const reflection::Schema *schema, int type_index); + +// Get any table field as a 64bit int, regardless of what type it is. +inline int64_t GetAnyFieldI(const Table &table, + const reflection::Field &field) { + auto field_ptr = table.GetAddressOf(field.offset()); + return field_ptr ? GetAnyValueI(field.type()->base_type(), field_ptr) + : field.default_integer(); +} + +// Get any table field as a double, regardless of what type it is. +inline double GetAnyFieldF(const Table &table, const reflection::Field &field) { + auto field_ptr = table.GetAddressOf(field.offset()); + return field_ptr ? GetAnyValueF(field.type()->base_type(), field_ptr) + : field.default_real(); +} + +// Get any table field as a string, regardless of what type it is. +// You may pass nullptr for the MindSpore.schema if you don't care to have fields that +// are of table type pretty-printed. +inline std::string GetAnyFieldS(const Table &table, + const reflection::Field &field, + const reflection::Schema *schema) { + auto field_ptr = table.GetAddressOf(field.offset()); + return field_ptr ? GetAnyValueS(field.type()->base_type(), field_ptr, schema, + field.type()->index()) + : ""; +} + +// Get any struct field as a 64bit int, regardless of what type it is. +inline int64_t GetAnyFieldI(const Struct &st, const reflection::Field &field) { + return GetAnyValueI(field.type()->base_type(), + st.GetAddressOf(field.offset())); +} + +// Get any struct field as a double, regardless of what type it is. +inline double GetAnyFieldF(const Struct &st, const reflection::Field &field) { + return GetAnyValueF(field.type()->base_type(), + st.GetAddressOf(field.offset())); +} + +// Get any struct field as a string, regardless of what type it is. +inline std::string GetAnyFieldS(const Struct &st, + const reflection::Field &field) { + return GetAnyValueS(field.type()->base_type(), + st.GetAddressOf(field.offset()), nullptr, -1); +} + +// Get any vector element as a 64bit int, regardless of what type it is. +inline int64_t GetAnyVectorElemI(const VectorOfAny *vec, + reflection::BaseType elem_type, size_t i) { + return GetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i); +} + +// Get any vector element as a double, regardless of what type it is. +inline double GetAnyVectorElemF(const VectorOfAny *vec, + reflection::BaseType elem_type, size_t i) { + return GetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i); +} + +// Get any vector element as a string, regardless of what type it is. +inline std::string GetAnyVectorElemS(const VectorOfAny *vec, + reflection::BaseType elem_type, size_t i) { + return GetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, + nullptr, -1); +} + +// Get a vector element that's a table/string/vector from a generic vector. +// Pass Table/String/VectorOfAny as template parameter. +// Warning: does no typechecking. +template +T *GetAnyVectorElemPointer(const VectorOfAny *vec, size_t i) { + auto elem_ptr = vec->Data() + sizeof(uoffset_t) * i; + return reinterpret_cast(elem_ptr + ReadScalar(elem_ptr)); +} + +// Get the inline-address of a vector element. Useful for Structs (pass Struct +// as template arg), or being able to address a range of scalars in-line. +// Get elem_size from GetTypeSizeInline(). +// Note: little-endian data on all platforms, use EndianScalar() instead of +// raw pointer access with scalars). +template +T *GetAnyVectorElemAddressOf(const VectorOfAny *vec, size_t i, + size_t elem_size) { + return reinterpret_cast(vec->Data() + elem_size * i); +} + +// Similarly, for elements of tables. +template +T *GetAnyFieldAddressOf(const Table &table, const reflection::Field &field) { + return reinterpret_cast(table.GetAddressOf(field.offset())); +} + +// Similarly, for elements of structs. +template +T *GetAnyFieldAddressOf(const Struct &st, const reflection::Field &field) { + return reinterpret_cast(st.GetAddressOf(field.offset())); +} + +// ------------------------- SETTERS ------------------------- + +// Set any scalar field, if you know its exact type. +template +bool SetField(Table *table, const reflection::Field &field, T val) { + reflection::BaseType type = field.type()->base_type(); + if (!IsScalar(type)) { return false; } + FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(type)); + T def; + if (IsInteger(type)) { + def = GetFieldDefaultI(field); + } else { + FLATBUFFERS_ASSERT(IsFloat(type)); + def = GetFieldDefaultF(field); + } + return table->SetField(field.offset(), val, def); +} + +// Raw helper functions used below: set any value in memory as a 64bit int, a +// double or a string. +// These work for all scalar values, but do nothing for other data types. +// To set a string, see SetString below. +void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val); +void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val); +void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val); + +// Set any table field as a 64bit int, regardless of type what it is. +inline bool SetAnyFieldI(Table *table, const reflection::Field &field, + int64_t val) { + auto field_ptr = table->GetAddressOf(field.offset()); + if (!field_ptr) return val == GetFieldDefaultI(field); + SetAnyValueI(field.type()->base_type(), field_ptr, val); + return true; +} + +// Set any table field as a double, regardless of what type it is. +inline bool SetAnyFieldF(Table *table, const reflection::Field &field, + double val) { + auto field_ptr = table->GetAddressOf(field.offset()); + if (!field_ptr) return val == GetFieldDefaultF(field); + SetAnyValueF(field.type()->base_type(), field_ptr, val); + return true; +} + +// Set any table field as a string, regardless of what type it is. +inline bool SetAnyFieldS(Table *table, const reflection::Field &field, + const char *val) { + auto field_ptr = table->GetAddressOf(field.offset()); + if (!field_ptr) return false; + SetAnyValueS(field.type()->base_type(), field_ptr, val); + return true; +} + +// Set any struct field as a 64bit int, regardless of type what it is. +inline void SetAnyFieldI(Struct *st, const reflection::Field &field, + int64_t val) { + SetAnyValueI(field.type()->base_type(), st->GetAddressOf(field.offset()), + val); +} + +// Set any struct field as a double, regardless of type what it is. +inline void SetAnyFieldF(Struct *st, const reflection::Field &field, + double val) { + SetAnyValueF(field.type()->base_type(), st->GetAddressOf(field.offset()), + val); +} + +// Set any struct field as a string, regardless of type what it is. +inline void SetAnyFieldS(Struct *st, const reflection::Field &field, + const char *val) { + SetAnyValueS(field.type()->base_type(), st->GetAddressOf(field.offset()), + val); +} + +// Set any vector element as a 64bit int, regardless of type what it is. +inline void SetAnyVectorElemI(VectorOfAny *vec, reflection::BaseType elem_type, + size_t i, int64_t val) { + SetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val); +} + +// Set any vector element as a double, regardless of type what it is. +inline void SetAnyVectorElemF(VectorOfAny *vec, reflection::BaseType elem_type, + size_t i, double val) { + SetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val); +} + +// Set any vector element as a string, regardless of type what it is. +inline void SetAnyVectorElemS(VectorOfAny *vec, reflection::BaseType elem_type, + size_t i, const char *val) { + SetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val); +} + +// ------------------------- RESIZING SETTERS ------------------------- + +// "smart" pointer for use with resizing vectors: turns a pointer inside +// a vector into a relative offset, such that it is not affected by resizes. +template class pointer_inside_vector { + public: + pointer_inside_vector(T *ptr, std::vector &vec) + : offset_(reinterpret_cast(ptr) - + reinterpret_cast(flatbuffers::vector_data(vec))), + vec_(vec) {} + + T *operator*() const { + return reinterpret_cast( + reinterpret_cast(flatbuffers::vector_data(vec_)) + offset_); + } + T *operator->() const { return operator*(); } + void operator=(const pointer_inside_vector &piv); + + private: + size_t offset_; + std::vector &vec_; +}; + +// Helper to create the above easily without specifying template args. +template +pointer_inside_vector piv(T *ptr, std::vector &vec) { + return pointer_inside_vector(ptr, vec); +} + +inline const char *UnionTypeFieldSuffix() { return "_type"; } + +// Helper to figure out the actual table type a union refers to. +inline const reflection::Object &GetUnionType( + const reflection::Schema &schema, const reflection::Object &parent, + const reflection::Field &unionfield, const Table &table) { + auto enumdef = schema.enums()->Get(unionfield.type()->index()); + // TODO: this is clumsy and slow, but no other way to find it? + auto type_field = parent.fields()->LookupByKey( + (unionfield.name()->str() + UnionTypeFieldSuffix()).c_str()); + FLATBUFFERS_ASSERT(type_field); + auto union_type = GetFieldI(table, *type_field); + auto enumval = enumdef->values()->LookupByKey(union_type); + return *enumval->object(); +} + +// Changes the contents of a string inside a FlatBuffer. FlatBuffer must +// live inside a std::vector so we can resize the buffer if needed. +// "str" must live inside "flatbuf" and may be invalidated after this call. +// If your FlatBuffer's root table is not the MindSpore.schema's root table, you should +// pass in your root_table type as well. +void SetString(const reflection::Schema &schema, const std::string &val, + const String *str, std::vector *flatbuf, + const reflection::Object *root_table = nullptr); + +// Resizes a MindSpore.flatbuffers::Vector inside a FlatBuffer. FlatBuffer must +// live inside a std::vector so we can resize the buffer if needed. +// "vec" must live inside "flatbuf" and may be invalidated after this call. +// If your FlatBuffer's root table is not the MindSpore.schema's root table, you should +// pass in your root_table type as well. +uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize, + const VectorOfAny *vec, uoffset_t num_elems, + uoffset_t elem_size, std::vector *flatbuf, + const reflection::Object *root_table = nullptr); + +template +void ResizeVector(const reflection::Schema &schema, uoffset_t newsize, T val, + const Vector *vec, std::vector *flatbuf, + const reflection::Object *root_table = nullptr) { + auto delta_elem = static_cast(newsize) - static_cast(vec->size()); + auto newelems = ResizeAnyVector( + schema, newsize, reinterpret_cast(vec), vec->size(), + static_cast(sizeof(T)), flatbuf, root_table); + // Set new elements to "val". + for (int i = 0; i < delta_elem; i++) { + auto loc = newelems + i * sizeof(T); + auto is_scalar = flatbuffers::is_scalar::value; + if (is_scalar) { + WriteScalar(loc, val); + } else { // struct + *reinterpret_cast(loc) = val; + } + } +} + +// Adds any new data (in the form of a new FlatBuffer) to an existing +// FlatBuffer. This can be used when any of the above methods are not +// sufficient, in particular for adding new tables and new fields. +// This is potentially slightly less efficient than a FlatBuffer constructed +// in one piece, since the new FlatBuffer doesn't share any vtables with the +// existing one. +// The return value can now be set using Vector::MutateOffset or SetFieldT +// below. +const uint8_t *AddFlatBuffer(std::vector &flatbuf, + const uint8_t *newbuf, size_t newlen); + +inline bool SetFieldT(Table *table, const reflection::Field &field, + const uint8_t *val) { + FLATBUFFERS_ASSERT(sizeof(uoffset_t) == + GetTypeSize(field.type()->base_type())); + return table->SetPointer(field.offset(), val); +} + +// ------------------------- COPYING ------------------------- + +// Generic copying of tables from a FlatBuffer into a FlatBuffer builder. +// Can be used to do any kind of merging/selecting you may want to do out +// of existing buffers. Also useful to reconstruct a whole buffer if the +// above resizing functionality has introduced garbage in a buffer you want +// to remove. +// Note: this does not deal with DAGs correctly. If the table passed forms a +// DAG, the copy will be a tree instead (with duplicates). Strings can be +// shared however, by passing true for use_string_pooling. + +Offset CopyTable(FlatBufferBuilder &fbb, + const reflection::Schema &schema, + const reflection::Object &objectdef, + const Table &table, + bool use_string_pooling = false); + +// Verifies the provided flatbuffer using reflection. +// root should point to the root type for this flatbuffer. +// buf should point to the start of flatbuffer data. +// length specifies the size of the flatbuffer data. +bool Verify(const reflection::Schema &schema, const reflection::Object &root, + const uint8_t *buf, size_t length); + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_REFLECTION_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/reflection_generated.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/reflection_generated.h new file mode 100644 index 0000000000000000000000000000000000000000..869a9f3f22346b5ab326f13f6674595b1c4aa317 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/reflection_generated.h @@ -0,0 +1,1182 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_ +#define FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_ + +#include "flatbuffers/flatbuffers.h" + +namespace reflection { + +struct Type; + +struct KeyValue; + +struct EnumVal; + +struct Enum; + +struct Field; + +struct Object; + +struct RPCCall; + +struct Service; + +struct Schema; + +enum BaseType { + None = 0, + UType = 1, + Bool = 2, + Byte = 3, + UByte = 4, + Short = 5, + UShort = 6, + Int = 7, + UInt = 8, + Long = 9, + ULong = 10, + Float = 11, + Double = 12, + String = 13, + Vector = 14, + Obj = 15, + Union = 16 +}; + +inline const BaseType (&EnumValuesBaseType())[17] { + static const BaseType values[] = { + None, + UType, + Bool, + Byte, + UByte, + Short, + UShort, + Int, + UInt, + Long, + ULong, + Float, + Double, + String, + Vector, + Obj, + Union + }; + return values; +} + +inline const char * const *EnumNamesBaseType() { + static const char * const names[] = { + "None", + "UType", + "Bool", + "Byte", + "UByte", + "Short", + "UShort", + "Int", + "UInt", + "Long", + "ULong", + "Float", + "Double", + "String", + "Vector", + "Obj", + "Union", + nullptr + }; + return names; +} + +inline const char *EnumNameBaseType(BaseType e) { + if (e < None || e > Union) return ""; + const size_t index = static_cast(e); + return EnumNamesBaseType()[index]; +} + +struct Type FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BASE_TYPE = 4, + VT_ELEMENT = 6, + VT_INDEX = 8 + }; + BaseType base_type() const { + return static_cast(GetField(VT_BASE_TYPE, 0)); + } + BaseType element() const { + return static_cast(GetField(VT_ELEMENT, 0)); + } + int32_t index() const { + return GetField(VT_INDEX, -1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BASE_TYPE) && + VerifyField(verifier, VT_ELEMENT) && + VerifyField(verifier, VT_INDEX) && + verifier.EndTable(); + } +}; + +struct TypeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_base_type(BaseType base_type) { + fbb_.AddElement(Type::VT_BASE_TYPE, static_cast(base_type), 0); + } + void add_element(BaseType element) { + fbb_.AddElement(Type::VT_ELEMENT, static_cast(element), 0); + } + void add_index(int32_t index) { + fbb_.AddElement(Type::VT_INDEX, index, -1); + } + explicit TypeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TypeBuilder &operator=(const TypeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateType( + flatbuffers::FlatBufferBuilder &_fbb, + BaseType base_type = None, + BaseType element = None, + int32_t index = -1) { + TypeBuilder builder_(_fbb); + builder_.add_index(index); + builder_.add_element(element); + builder_.add_base_type(base_type); + return builder_.Finish(); +} + +struct KeyValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEY = 4, + VT_VALUE = 6 + }; + const flatbuffers::String *key() const { + return GetPointer(VT_KEY); + } + bool KeyCompareLessThan(const KeyValue *o) const { + return *key() < *o->key(); + } + int KeyCompareWithValue(const char *val) const { + return strcmp(key()->c_str(), val); + } + const flatbuffers::String *value() const { + return GetPointer(VT_VALUE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_KEY) && + verifier.VerifyString(key()) && + VerifyOffset(verifier, VT_VALUE) && + verifier.VerifyString(value()) && + verifier.EndTable(); + } +}; + +struct KeyValueBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_key(flatbuffers::Offset key) { + fbb_.AddOffset(KeyValue::VT_KEY, key); + } + void add_value(flatbuffers::Offset value) { + fbb_.AddOffset(KeyValue::VT_VALUE, value); + } + explicit KeyValueBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + KeyValueBuilder &operator=(const KeyValueBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, KeyValue::VT_KEY); + return o; + } +}; + +inline flatbuffers::Offset CreateKeyValue( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset key = 0, + flatbuffers::Offset value = 0) { + KeyValueBuilder builder_(_fbb); + builder_.add_value(value); + builder_.add_key(key); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateKeyValueDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *key = nullptr, + const char *value = nullptr) { + auto key__ = key ? _fbb.CreateString(key) : 0; + auto value__ = value ? _fbb.CreateString(value) : 0; + return reflection::CreateKeyValue( + _fbb, + key__, + value__); +} + +struct EnumVal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_VALUE = 6, + VT_OBJECT = 8, + VT_UNION_TYPE = 10, + VT_DOCUMENTATION = 12 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + int64_t value() const { + return GetField(VT_VALUE, 0); + } + bool KeyCompareLessThan(const EnumVal *o) const { + return value() < o->value(); + } + int KeyCompareWithValue(int64_t val) const { + return static_cast(value() > val) - static_cast(value() < val); + } + const Object *object() const { + return GetPointer(VT_OBJECT); + } + const Type *union_type() const { + return GetPointer(VT_UNION_TYPE); + } + const flatbuffers::Vector> *documentation() const { + return GetPointer> *>(VT_DOCUMENTATION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_VALUE) && + VerifyOffset(verifier, VT_OBJECT) && + verifier.VerifyTable(object()) && + VerifyOffset(verifier, VT_UNION_TYPE) && + verifier.VerifyTable(union_type()) && + VerifyOffset(verifier, VT_DOCUMENTATION) && + verifier.VerifyVector(documentation()) && + verifier.VerifyVectorOfStrings(documentation()) && + verifier.EndTable(); + } +}; + +struct EnumValBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(EnumVal::VT_NAME, name); + } + void add_value(int64_t value) { + fbb_.AddElement(EnumVal::VT_VALUE, value, 0); + } + void add_object(flatbuffers::Offset object) { + fbb_.AddOffset(EnumVal::VT_OBJECT, object); + } + void add_union_type(flatbuffers::Offset union_type) { + fbb_.AddOffset(EnumVal::VT_UNION_TYPE, union_type); + } + void add_documentation(flatbuffers::Offset>> documentation) { + fbb_.AddOffset(EnumVal::VT_DOCUMENTATION, documentation); + } + explicit EnumValBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EnumValBuilder &operator=(const EnumValBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, EnumVal::VT_NAME); + return o; + } +}; + +inline flatbuffers::Offset CreateEnumVal( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + int64_t value = 0, + flatbuffers::Offset object = 0, + flatbuffers::Offset union_type = 0, + flatbuffers::Offset>> documentation = 0) { + EnumValBuilder builder_(_fbb); + builder_.add_value(value); + builder_.add_documentation(documentation); + builder_.add_union_type(union_type); + builder_.add_object(object); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEnumValDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + int64_t value = 0, + flatbuffers::Offset object = 0, + flatbuffers::Offset union_type = 0, + const std::vector> *documentation = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto documentation__ = documentation ? _fbb.CreateVector>(*documentation) : 0; + return reflection::CreateEnumVal( + _fbb, + name__, + value, + object, + union_type, + documentation__); +} + +struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_VALUES = 6, + VT_IS_UNION = 8, + VT_UNDERLYING_TYPE = 10, + VT_ATTRIBUTES = 12, + VT_DOCUMENTATION = 14 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool KeyCompareLessThan(const Enum *o) const { + return *name() < *o->name(); + } + int KeyCompareWithValue(const char *val) const { + return strcmp(name()->c_str(), val); + } + const flatbuffers::Vector> *values() const { + return GetPointer> *>(VT_VALUES); + } + bool is_union() const { + return GetField(VT_IS_UNION, 0) != 0; + } + const Type *underlying_type() const { + return GetPointer(VT_UNDERLYING_TYPE); + } + const flatbuffers::Vector> *attributes() const { + return GetPointer> *>(VT_ATTRIBUTES); + } + const flatbuffers::Vector> *documentation() const { + return GetPointer> *>(VT_DOCUMENTATION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffsetRequired(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.VerifyVectorOfTables(values()) && + VerifyField(verifier, VT_IS_UNION) && + VerifyOffsetRequired(verifier, VT_UNDERLYING_TYPE) && + verifier.VerifyTable(underlying_type()) && + VerifyOffset(verifier, VT_ATTRIBUTES) && + verifier.VerifyVector(attributes()) && + verifier.VerifyVectorOfTables(attributes()) && + VerifyOffset(verifier, VT_DOCUMENTATION) && + verifier.VerifyVector(documentation()) && + verifier.VerifyVectorOfStrings(documentation()) && + verifier.EndTable(); + } +}; + +struct EnumBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Enum::VT_NAME, name); + } + void add_values(flatbuffers::Offset>> values) { + fbb_.AddOffset(Enum::VT_VALUES, values); + } + void add_is_union(bool is_union) { + fbb_.AddElement(Enum::VT_IS_UNION, static_cast(is_union), 0); + } + void add_underlying_type(flatbuffers::Offset underlying_type) { + fbb_.AddOffset(Enum::VT_UNDERLYING_TYPE, underlying_type); + } + void add_attributes(flatbuffers::Offset>> attributes) { + fbb_.AddOffset(Enum::VT_ATTRIBUTES, attributes); + } + void add_documentation(flatbuffers::Offset>> documentation) { + fbb_.AddOffset(Enum::VT_DOCUMENTATION, documentation); + } + explicit EnumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EnumBuilder &operator=(const EnumBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, Enum::VT_NAME); + fbb_.Required(o, Enum::VT_VALUES); + fbb_.Required(o, Enum::VT_UNDERLYING_TYPE); + return o; + } +}; + +inline flatbuffers::Offset CreateEnum( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset>> values = 0, + bool is_union = false, + flatbuffers::Offset underlying_type = 0, + flatbuffers::Offset>> attributes = 0, + flatbuffers::Offset>> documentation = 0) { + EnumBuilder builder_(_fbb); + builder_.add_documentation(documentation); + builder_.add_attributes(attributes); + builder_.add_underlying_type(underlying_type); + builder_.add_values(values); + builder_.add_name(name); + builder_.add_is_union(is_union); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEnumDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const std::vector> *values = nullptr, + bool is_union = false, + flatbuffers::Offset underlying_type = 0, + const std::vector> *attributes = nullptr, + const std::vector> *documentation = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto values__ = values ? _fbb.CreateVector>(*values) : 0; + auto attributes__ = attributes ? _fbb.CreateVector>(*attributes) : 0; + auto documentation__ = documentation ? _fbb.CreateVector>(*documentation) : 0; + return reflection::CreateEnum( + _fbb, + name__, + values__, + is_union, + underlying_type, + attributes__, + documentation__); +} + +struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_TYPE = 6, + VT_ID = 8, + VT_OFFSET = 10, + VT_DEFAULT_INTEGER = 12, + VT_DEFAULT_REAL = 14, + VT_DEPRECATED = 16, + VT_REQUIRED = 18, + VT_KEY = 20, + VT_ATTRIBUTES = 22, + VT_DOCUMENTATION = 24 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool KeyCompareLessThan(const Field *o) const { + return *name() < *o->name(); + } + int KeyCompareWithValue(const char *val) const { + return strcmp(name()->c_str(), val); + } + const Type *type() const { + return GetPointer(VT_TYPE); + } + uint16_t id() const { + return GetField(VT_ID, 0); + } + uint16_t offset() const { + return GetField(VT_OFFSET, 0); + } + int64_t default_integer() const { + return GetField(VT_DEFAULT_INTEGER, 0); + } + double default_real() const { + return GetField(VT_DEFAULT_REAL, 0.0); + } + bool deprecated() const { + return GetField(VT_DEPRECATED, 0) != 0; + } + bool required() const { + return GetField(VT_REQUIRED, 0) != 0; + } + bool key() const { + return GetField(VT_KEY, 0) != 0; + } + const flatbuffers::Vector> *attributes() const { + return GetPointer> *>(VT_ATTRIBUTES); + } + const flatbuffers::Vector> *documentation() const { + return GetPointer> *>(VT_DOCUMENTATION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffsetRequired(verifier, VT_TYPE) && + verifier.VerifyTable(type()) && + VerifyField(verifier, VT_ID) && + VerifyField(verifier, VT_OFFSET) && + VerifyField(verifier, VT_DEFAULT_INTEGER) && + VerifyField(verifier, VT_DEFAULT_REAL) && + VerifyField(verifier, VT_DEPRECATED) && + VerifyField(verifier, VT_REQUIRED) && + VerifyField(verifier, VT_KEY) && + VerifyOffset(verifier, VT_ATTRIBUTES) && + verifier.VerifyVector(attributes()) && + verifier.VerifyVectorOfTables(attributes()) && + VerifyOffset(verifier, VT_DOCUMENTATION) && + verifier.VerifyVector(documentation()) && + verifier.VerifyVectorOfStrings(documentation()) && + verifier.EndTable(); + } +}; + +struct FieldBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Field::VT_NAME, name); + } + void add_type(flatbuffers::Offset type) { + fbb_.AddOffset(Field::VT_TYPE, type); + } + void add_id(uint16_t id) { + fbb_.AddElement(Field::VT_ID, id, 0); + } + void add_offset(uint16_t offset) { + fbb_.AddElement(Field::VT_OFFSET, offset, 0); + } + void add_default_integer(int64_t default_integer) { + fbb_.AddElement(Field::VT_DEFAULT_INTEGER, default_integer, 0); + } + void add_default_real(double default_real) { + fbb_.AddElement(Field::VT_DEFAULT_REAL, default_real, 0.0); + } + void add_deprecated(bool deprecated) { + fbb_.AddElement(Field::VT_DEPRECATED, static_cast(deprecated), 0); + } + void add_required(bool required) { + fbb_.AddElement(Field::VT_REQUIRED, static_cast(required), 0); + } + void add_key(bool key) { + fbb_.AddElement(Field::VT_KEY, static_cast(key), 0); + } + void add_attributes(flatbuffers::Offset>> attributes) { + fbb_.AddOffset(Field::VT_ATTRIBUTES, attributes); + } + void add_documentation(flatbuffers::Offset>> documentation) { + fbb_.AddOffset(Field::VT_DOCUMENTATION, documentation); + } + explicit FieldBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FieldBuilder &operator=(const FieldBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, Field::VT_NAME); + fbb_.Required(o, Field::VT_TYPE); + return o; + } +}; + +inline flatbuffers::Offset CreateField( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset type = 0, + uint16_t id = 0, + uint16_t offset = 0, + int64_t default_integer = 0, + double default_real = 0.0, + bool deprecated = false, + bool required = false, + bool key = false, + flatbuffers::Offset>> attributes = 0, + flatbuffers::Offset>> documentation = 0) { + FieldBuilder builder_(_fbb); + builder_.add_default_real(default_real); + builder_.add_default_integer(default_integer); + builder_.add_documentation(documentation); + builder_.add_attributes(attributes); + builder_.add_type(type); + builder_.add_name(name); + builder_.add_offset(offset); + builder_.add_id(id); + builder_.add_key(key); + builder_.add_required(required); + builder_.add_deprecated(deprecated); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateFieldDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + flatbuffers::Offset type = 0, + uint16_t id = 0, + uint16_t offset = 0, + int64_t default_integer = 0, + double default_real = 0.0, + bool deprecated = false, + bool required = false, + bool key = false, + const std::vector> *attributes = nullptr, + const std::vector> *documentation = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto attributes__ = attributes ? _fbb.CreateVector>(*attributes) : 0; + auto documentation__ = documentation ? _fbb.CreateVector>(*documentation) : 0; + return reflection::CreateField( + _fbb, + name__, + type, + id, + offset, + default_integer, + default_real, + deprecated, + required, + key, + attributes__, + documentation__); +} + +struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_FIELDS = 6, + VT_IS_STRUCT = 8, + VT_MINALIGN = 10, + VT_BYTESIZE = 12, + VT_ATTRIBUTES = 14, + VT_DOCUMENTATION = 16 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool KeyCompareLessThan(const Object *o) const { + return *name() < *o->name(); + } + int KeyCompareWithValue(const char *val) const { + return strcmp(name()->c_str(), val); + } + const flatbuffers::Vector> *fields() const { + return GetPointer> *>(VT_FIELDS); + } + bool is_struct() const { + return GetField(VT_IS_STRUCT, 0) != 0; + } + int32_t minalign() const { + return GetField(VT_MINALIGN, 0); + } + int32_t bytesize() const { + return GetField(VT_BYTESIZE, 0); + } + const flatbuffers::Vector> *attributes() const { + return GetPointer> *>(VT_ATTRIBUTES); + } + const flatbuffers::Vector> *documentation() const { + return GetPointer> *>(VT_DOCUMENTATION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffsetRequired(verifier, VT_FIELDS) && + verifier.VerifyVector(fields()) && + verifier.VerifyVectorOfTables(fields()) && + VerifyField(verifier, VT_IS_STRUCT) && + VerifyField(verifier, VT_MINALIGN) && + VerifyField(verifier, VT_BYTESIZE) && + VerifyOffset(verifier, VT_ATTRIBUTES) && + verifier.VerifyVector(attributes()) && + verifier.VerifyVectorOfTables(attributes()) && + VerifyOffset(verifier, VT_DOCUMENTATION) && + verifier.VerifyVector(documentation()) && + verifier.VerifyVectorOfStrings(documentation()) && + verifier.EndTable(); + } +}; + +struct ObjectBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Object::VT_NAME, name); + } + void add_fields(flatbuffers::Offset>> fields) { + fbb_.AddOffset(Object::VT_FIELDS, fields); + } + void add_is_struct(bool is_struct) { + fbb_.AddElement(Object::VT_IS_STRUCT, static_cast(is_struct), 0); + } + void add_minalign(int32_t minalign) { + fbb_.AddElement(Object::VT_MINALIGN, minalign, 0); + } + void add_bytesize(int32_t bytesize) { + fbb_.AddElement(Object::VT_BYTESIZE, bytesize, 0); + } + void add_attributes(flatbuffers::Offset>> attributes) { + fbb_.AddOffset(Object::VT_ATTRIBUTES, attributes); + } + void add_documentation(flatbuffers::Offset>> documentation) { + fbb_.AddOffset(Object::VT_DOCUMENTATION, documentation); + } + explicit ObjectBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ObjectBuilder &operator=(const ObjectBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, Object::VT_NAME); + fbb_.Required(o, Object::VT_FIELDS); + return o; + } +}; + +inline flatbuffers::Offset CreateObject( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset>> fields = 0, + bool is_struct = false, + int32_t minalign = 0, + int32_t bytesize = 0, + flatbuffers::Offset>> attributes = 0, + flatbuffers::Offset>> documentation = 0) { + ObjectBuilder builder_(_fbb); + builder_.add_documentation(documentation); + builder_.add_attributes(attributes); + builder_.add_bytesize(bytesize); + builder_.add_minalign(minalign); + builder_.add_fields(fields); + builder_.add_name(name); + builder_.add_is_struct(is_struct); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateObjectDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const std::vector> *fields = nullptr, + bool is_struct = false, + int32_t minalign = 0, + int32_t bytesize = 0, + const std::vector> *attributes = nullptr, + const std::vector> *documentation = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto fields__ = fields ? _fbb.CreateVector>(*fields) : 0; + auto attributes__ = attributes ? _fbb.CreateVector>(*attributes) : 0; + auto documentation__ = documentation ? _fbb.CreateVector>(*documentation) : 0; + return reflection::CreateObject( + _fbb, + name__, + fields__, + is_struct, + minalign, + bytesize, + attributes__, + documentation__); +} + +struct RPCCall FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_REQUEST = 6, + VT_RESPONSE = 8, + VT_ATTRIBUTES = 10, + VT_DOCUMENTATION = 12 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool KeyCompareLessThan(const RPCCall *o) const { + return *name() < *o->name(); + } + int KeyCompareWithValue(const char *val) const { + return strcmp(name()->c_str(), val); + } + const Object *request() const { + return GetPointer(VT_REQUEST); + } + const Object *response() const { + return GetPointer(VT_RESPONSE); + } + const flatbuffers::Vector> *attributes() const { + return GetPointer> *>(VT_ATTRIBUTES); + } + const flatbuffers::Vector> *documentation() const { + return GetPointer> *>(VT_DOCUMENTATION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffsetRequired(verifier, VT_REQUEST) && + verifier.VerifyTable(request()) && + VerifyOffsetRequired(verifier, VT_RESPONSE) && + verifier.VerifyTable(response()) && + VerifyOffset(verifier, VT_ATTRIBUTES) && + verifier.VerifyVector(attributes()) && + verifier.VerifyVectorOfTables(attributes()) && + VerifyOffset(verifier, VT_DOCUMENTATION) && + verifier.VerifyVector(documentation()) && + verifier.VerifyVectorOfStrings(documentation()) && + verifier.EndTable(); + } +}; + +struct RPCCallBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(RPCCall::VT_NAME, name); + } + void add_request(flatbuffers::Offset request) { + fbb_.AddOffset(RPCCall::VT_REQUEST, request); + } + void add_response(flatbuffers::Offset response) { + fbb_.AddOffset(RPCCall::VT_RESPONSE, response); + } + void add_attributes(flatbuffers::Offset>> attributes) { + fbb_.AddOffset(RPCCall::VT_ATTRIBUTES, attributes); + } + void add_documentation(flatbuffers::Offset>> documentation) { + fbb_.AddOffset(RPCCall::VT_DOCUMENTATION, documentation); + } + explicit RPCCallBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RPCCallBuilder &operator=(const RPCCallBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, RPCCall::VT_NAME); + fbb_.Required(o, RPCCall::VT_REQUEST); + fbb_.Required(o, RPCCall::VT_RESPONSE); + return o; + } +}; + +inline flatbuffers::Offset CreateRPCCall( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset request = 0, + flatbuffers::Offset response = 0, + flatbuffers::Offset>> attributes = 0, + flatbuffers::Offset>> documentation = 0) { + RPCCallBuilder builder_(_fbb); + builder_.add_documentation(documentation); + builder_.add_attributes(attributes); + builder_.add_response(response); + builder_.add_request(request); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateRPCCallDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + flatbuffers::Offset request = 0, + flatbuffers::Offset response = 0, + const std::vector> *attributes = nullptr, + const std::vector> *documentation = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto attributes__ = attributes ? _fbb.CreateVector>(*attributes) : 0; + auto documentation__ = documentation ? _fbb.CreateVector>(*documentation) : 0; + return reflection::CreateRPCCall( + _fbb, + name__, + request, + response, + attributes__, + documentation__); +} + +struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_CALLS = 6, + VT_ATTRIBUTES = 8, + VT_DOCUMENTATION = 10 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool KeyCompareLessThan(const Service *o) const { + return *name() < *o->name(); + } + int KeyCompareWithValue(const char *val) const { + return strcmp(name()->c_str(), val); + } + const flatbuffers::Vector> *calls() const { + return GetPointer> *>(VT_CALLS); + } + const flatbuffers::Vector> *attributes() const { + return GetPointer> *>(VT_ATTRIBUTES); + } + const flatbuffers::Vector> *documentation() const { + return GetPointer> *>(VT_DOCUMENTATION); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_CALLS) && + verifier.VerifyVector(calls()) && + verifier.VerifyVectorOfTables(calls()) && + VerifyOffset(verifier, VT_ATTRIBUTES) && + verifier.VerifyVector(attributes()) && + verifier.VerifyVectorOfTables(attributes()) && + VerifyOffset(verifier, VT_DOCUMENTATION) && + verifier.VerifyVector(documentation()) && + verifier.VerifyVectorOfStrings(documentation()) && + verifier.EndTable(); + } +}; + +struct ServiceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Service::VT_NAME, name); + } + void add_calls(flatbuffers::Offset>> calls) { + fbb_.AddOffset(Service::VT_CALLS, calls); + } + void add_attributes(flatbuffers::Offset>> attributes) { + fbb_.AddOffset(Service::VT_ATTRIBUTES, attributes); + } + void add_documentation(flatbuffers::Offset>> documentation) { + fbb_.AddOffset(Service::VT_DOCUMENTATION, documentation); + } + explicit ServiceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ServiceBuilder &operator=(const ServiceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, Service::VT_NAME); + return o; + } +}; + +inline flatbuffers::Offset CreateService( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + flatbuffers::Offset>> calls = 0, + flatbuffers::Offset>> attributes = 0, + flatbuffers::Offset>> documentation = 0) { + ServiceBuilder builder_(_fbb); + builder_.add_documentation(documentation); + builder_.add_attributes(attributes); + builder_.add_calls(calls); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateServiceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const std::vector> *calls = nullptr, + const std::vector> *attributes = nullptr, + const std::vector> *documentation = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto calls__ = calls ? _fbb.CreateVector>(*calls) : 0; + auto attributes__ = attributes ? _fbb.CreateVector>(*attributes) : 0; + auto documentation__ = documentation ? _fbb.CreateVector>(*documentation) : 0; + return reflection::CreateService( + _fbb, + name__, + calls__, + attributes__, + documentation__); +} + +struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OBJECTS = 4, + VT_ENUMS = 6, + VT_FILE_IDENT = 8, + VT_FILE_EXT = 10, + VT_ROOT_TABLE = 12, + VT_SERVICES = 14 + }; + const flatbuffers::Vector> *objects() const { + return GetPointer> *>(VT_OBJECTS); + } + const flatbuffers::Vector> *enums() const { + return GetPointer> *>(VT_ENUMS); + } + const flatbuffers::String *file_ident() const { + return GetPointer(VT_FILE_IDENT); + } + const flatbuffers::String *file_ext() const { + return GetPointer(VT_FILE_EXT); + } + const Object *root_table() const { + return GetPointer(VT_ROOT_TABLE); + } + const flatbuffers::Vector> *services() const { + return GetPointer> *>(VT_SERVICES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffsetRequired(verifier, VT_OBJECTS) && + verifier.VerifyVector(objects()) && + verifier.VerifyVectorOfTables(objects()) && + VerifyOffsetRequired(verifier, VT_ENUMS) && + verifier.VerifyVector(enums()) && + verifier.VerifyVectorOfTables(enums()) && + VerifyOffset(verifier, VT_FILE_IDENT) && + verifier.VerifyString(file_ident()) && + VerifyOffset(verifier, VT_FILE_EXT) && + verifier.VerifyString(file_ext()) && + VerifyOffset(verifier, VT_ROOT_TABLE) && + verifier.VerifyTable(root_table()) && + VerifyOffset(verifier, VT_SERVICES) && + verifier.VerifyVector(services()) && + verifier.VerifyVectorOfTables(services()) && + verifier.EndTable(); + } +}; + +struct SchemaBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_objects(flatbuffers::Offset>> objects) { + fbb_.AddOffset(Schema::VT_OBJECTS, objects); + } + void add_enums(flatbuffers::Offset>> enums) { + fbb_.AddOffset(Schema::VT_ENUMS, enums); + } + void add_file_ident(flatbuffers::Offset file_ident) { + fbb_.AddOffset(Schema::VT_FILE_IDENT, file_ident); + } + void add_file_ext(flatbuffers::Offset file_ext) { + fbb_.AddOffset(Schema::VT_FILE_EXT, file_ext); + } + void add_root_table(flatbuffers::Offset root_table) { + fbb_.AddOffset(Schema::VT_ROOT_TABLE, root_table); + } + void add_services(flatbuffers::Offset>> services) { + fbb_.AddOffset(Schema::VT_SERVICES, services); + } + explicit SchemaBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SchemaBuilder &operator=(const SchemaBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + fbb_.Required(o, Schema::VT_OBJECTS); + fbb_.Required(o, Schema::VT_ENUMS); + return o; + } +}; + +inline flatbuffers::Offset CreateSchema( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> objects = 0, + flatbuffers::Offset>> enums = 0, + flatbuffers::Offset file_ident = 0, + flatbuffers::Offset file_ext = 0, + flatbuffers::Offset root_table = 0, + flatbuffers::Offset>> services = 0) { + SchemaBuilder builder_(_fbb); + builder_.add_services(services); + builder_.add_root_table(root_table); + builder_.add_file_ext(file_ext); + builder_.add_file_ident(file_ident); + builder_.add_enums(enums); + builder_.add_objects(objects); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSchemaDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *objects = nullptr, + const std::vector> *enums = nullptr, + const char *file_ident = nullptr, + const char *file_ext = nullptr, + flatbuffers::Offset root_table = 0, + const std::vector> *services = nullptr) { + auto objects__ = objects ? _fbb.CreateVector>(*objects) : 0; + auto enums__ = enums ? _fbb.CreateVector>(*enums) : 0; + auto file_ident__ = file_ident ? _fbb.CreateString(file_ident) : 0; + auto file_ext__ = file_ext ? _fbb.CreateString(file_ext) : 0; + auto services__ = services ? _fbb.CreateVector>(*services) : 0; + return reflection::CreateSchema( + _fbb, + objects__, + enums__, + file_ident__, + file_ext__, + root_table, + services__); +} + +inline const reflection::Schema *GetSchema(const void *buf) { + return flatbuffers::GetRoot(buf); +} + +inline const reflection::Schema *GetSizePrefixedSchema(const void *buf) { + return flatbuffers::GetSizePrefixedRoot(buf); +} + +inline const char *SchemaIdentifier() { + return "BFBS"; +} + +inline bool SchemaBufferHasIdentifier(const void *buf) { + return flatbuffers::BufferHasIdentifier( + buf, SchemaIdentifier()); +} + +inline bool VerifySchemaBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(SchemaIdentifier()); +} + +inline bool VerifySizePrefixedSchemaBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(SchemaIdentifier()); +} + +inline const char *SchemaExtension() { + return "bfbs"; +} + +inline void FinishSchemaBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.Finish(root, SchemaIdentifier()); +} + +inline void FinishSizePrefixedSchemaBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root, SchemaIdentifier()); +} + +} // namespace reflection + +#endif // FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/registry.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/registry.h new file mode 100644 index 0000000000000000000000000000000000000000..d390796fa5019fe8acc9e902a18fa39c3a07e62c --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/registry.h @@ -0,0 +1,127 @@ +/* + * Copyright 2017 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_REGISTRY_H_ +#define FLATBUFFERS_REGISTRY_H_ + +#include "idl.h" + +namespace flatbuffers { + +// Convenience class to easily parse or generate text for arbitrary FlatBuffers. +// Simply pre-populate it with all MindSpore.schema filenames that may be in use, and +// This class will look them up using the file_identifier declared in the +// MindSpore.schema. +class Registry { + public: + // Call this for all schemas that may be in use. The identifier has + // a function in the generated code, e.g. MonsterIdentifier(). + void Register(const char *file_identifier, const char *schema_path) { + Schema schema; + schema.path_ = schema_path; + schemas_[file_identifier] = schema; + } + + // Generate text from an arbitrary FlatBuffer by looking up its + // file_identifier in the registry. + bool FlatBufferToText(const uint8_t *flatbuf, size_t len, std::string *dest) { + // Get the identifier out of the buffer. + // If the buffer is truncated, exit. + if (len < sizeof(uoffset_t) + FlatBufferBuilder::kFileIdentifierLength) { + lasterror_ = "buffer truncated"; + return false; + } + std::string ident( + reinterpret_cast(flatbuf) + sizeof(uoffset_t), + FlatBufferBuilder::kFileIdentifierLength); + // Load and parse the MindSpore.schema. + Parser parser; + if (!LoadSchema(ident, &parser)) return false; + // Now we're ready to generate text. + if (!GenerateText(parser, flatbuf, dest)) { + lasterror_ = "unable to generate text for FlatBuffer binary"; + return false; + } + return true; + } + + // Converts a binary buffer to text using one of the schemas in the registry, + // use the file_identifier to indicate which. + // If DetachedBuffer::data() is null then parsing failed. + DetachedBuffer TextToFlatBuffer(const char *text, + const char *file_identifier) { + // Load and parse the MindSpore.schema. + Parser parser; + if (!LoadSchema(file_identifier, &parser)) return DetachedBuffer(); + // Parse the text. + if (!parser.Parse(text)) { + lasterror_ = parser.error_; + return DetachedBuffer(); + } + // We have a valid FlatBuffer. Detach it from the builder and return. + return parser.builder_.Release(); + } + + // Modify any parsing / output options used by the other functions. + void SetOptions(const IDLOptions &opts) { opts_ = opts; } + + // If schemas used contain include statements, call this function for every + // directory the parser should search them for. + void AddIncludeDirectory(const char *path) { include_paths_.push_back(path); } + + // Returns a human readable error if any of the above functions fail. + const std::string &GetLastError() { return lasterror_; } + + private: + bool LoadSchema(const std::string &ident, Parser *parser) { + // Find the MindSpore.schema, if not, exit. + auto it = schemas_.find(ident); + if (it == schemas_.end()) { + // Don't attach the identifier, since it may not be human readable. + lasterror_ = "identifier for this buffer not in the registry"; + return false; + } + auto &schema = it->second; + // Load the MindSpore.schema from disk. If not, exit. + std::string schematext; + if (!LoadFile(schema.path_.c_str(), false, &schematext)) { + lasterror_ = "could not load MindSpore.schema: " + schema.path_; + return false; + } + // Parse MindSpore.schema. + parser->opts = opts_; + if (!parser->Parse(schematext.c_str(), vector_data(include_paths_), + schema.path_.c_str())) { + lasterror_ = parser->error_; + return false; + } + return true; + } + + struct Schema { + std::string path_; + // TODO(wvo) optionally cache MindSpore.schema file or parsed MindSpore.schema here. + }; + + std::string lasterror_; + IDLOptions opts_; + std::vector include_paths_; + std::map schemas_; +}; + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_REGISTRY_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/stl_emulation.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/stl_emulation.h new file mode 100644 index 0000000000000000000000000000000000000000..6e4acab4075818a47a3318f0cbf7263d781cd03c --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/stl_emulation.h @@ -0,0 +1,275 @@ +/* + * Copyright 2017 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_STL_EMULATION_H_ +#define FLATBUFFERS_STL_EMULATION_H_ + +// clang-format off + +#include +#include +#include +#include +#include + +#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL) + #define FLATBUFFERS_CPP98_STL +#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL) + +#if defined(FLATBUFFERS_CPP98_STL) + #include +#endif // defined(FLATBUFFERS_CPP98_STL) + +// Check if we can use template aliases +// Not possible if Microsoft Compiler before 2012 +// Possible is the language feature __cpp_alias_templates is defined well +// Or possible if the C++ std is C+11 or newer +#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \ + || (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \ + || (defined(__cplusplus) && __cplusplus >= 201103L) + #define FLATBUFFERS_TEMPLATES_ALIASES +#endif + +// This header provides backwards compatibility for C++98 STLs like stlport. +namespace flatbuffers { + +// Retrieve ::back() from a string in a way that is compatible with pre C++11 +// STLs (e.g stlport). +inline char& string_back(std::string &value) { + return value[value.length() - 1]; +} + +inline char string_back(const std::string &value) { + return value[value.length() - 1]; +} + +// Helper method that retrieves ::data() from a vector in a way that is +// compatible with pre C++11 STLs (e.g stlport). +template inline T *vector_data(std::vector &vector) { + // In some debug environments, operator[] does bounds checking, so &vector[0] + // can't be used. + return vector.empty() ? nullptr : &vector[0]; +} + +template inline const T *vector_data( + const std::vector &vector) { + return vector.empty() ? nullptr : &vector[0]; +} + +template +inline void vector_emplace_back(std::vector *vector, V &&data) { + #if defined(FLATBUFFERS_CPP98_STL) + vector->push_back(data); + #else + vector->emplace_back(std::forward(data)); + #endif // defined(FLATBUFFERS_CPP98_STL) +} + +#ifndef FLATBUFFERS_CPP98_STL + #if defined(FLATBUFFERS_TEMPLATES_ALIASES) + template + using numeric_limits = std::numeric_limits; + #else + template class numeric_limits : + public std::numeric_limits {}; + #endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) +#else + template class numeric_limits : + public std::numeric_limits { + public: + // Android NDK fix. + static T lowest() { + return std::numeric_limits::min(); + } + }; + + template <> class numeric_limits : + public std::numeric_limits { + public: + static float lowest() { return -FLT_MAX; } + }; + + template <> class numeric_limits : + public std::numeric_limits { + public: + static double lowest() { return -DBL_MAX; } + }; + + template <> class numeric_limits { + public: + static unsigned long long min() { return 0ULL; } + static unsigned long long max() { return ~0ULL; } + static unsigned long long lowest() { + return numeric_limits::min(); + } + }; + + template <> class numeric_limits { + public: + static long long min() { + return static_cast(1ULL << ((sizeof(long long) << 3) - 1)); + } + static long long max() { + return static_cast( + (1ULL << ((sizeof(long long) << 3) - 1)) - 1); + } + static long long lowest() { + return numeric_limits::min(); + } + }; +#endif // FLATBUFFERS_CPP98_STL + +#if defined(FLATBUFFERS_TEMPLATES_ALIASES) + #ifndef FLATBUFFERS_CPP98_STL + template using is_scalar = std::is_scalar; + template using is_same = std::is_same; + template using is_floating_point = std::is_floating_point; + template using is_unsigned = std::is_unsigned; + template using make_unsigned = std::make_unsigned; + #else + // Map C++ TR1 templates defined by stlport. + template using is_scalar = std::tr1::is_scalar; + template using is_same = std::tr1::is_same; + template using is_floating_point = + std::tr1::is_floating_point; + template using is_unsigned = std::tr1::is_unsigned; + // Android NDK doesn't have std::make_unsigned or std::tr1::make_unsigned. + template struct make_unsigned { + static_assert(is_unsigned::value, "Specialization not implemented!"); + using type = T; + }; + template<> struct make_unsigned { using type = unsigned char; }; + template<> struct make_unsigned { using type = unsigned short; }; + template<> struct make_unsigned { using type = unsigned int; }; + template<> struct make_unsigned { using type = unsigned long; }; + template<> + struct make_unsigned { using type = unsigned long long; }; + #endif // !FLATBUFFERS_CPP98_STL +#else + // MSVC 2010 doesn't support C++11 aliases. + template struct is_scalar : public std::is_scalar {}; + template struct is_same : public std::is_same {}; + template struct is_floating_point : + public std::is_floating_point {}; + template struct is_unsigned : public std::is_unsigned {}; + template struct make_unsigned : public std::make_unsigned {}; +#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) + +#ifndef FLATBUFFERS_CPP98_STL + #if defined(FLATBUFFERS_TEMPLATES_ALIASES) + template using unique_ptr = std::unique_ptr; + #else + // MSVC 2010 doesn't support C++11 aliases. + // We're manually "aliasing" the class here as we want to bring unique_ptr + // into the MindSpore.flatbuffers namespace. We have unique_ptr in the MindSpore.flatbuffers + // namespace we have a completely independent implemenation (see below) + // for C++98 STL implementations. + template class unique_ptr : public std::unique_ptr { + public: + unique_ptr() {} + explicit unique_ptr(T* p) : std::unique_ptr(p) {} + unique_ptr(std::unique_ptr&& u) { *this = std::move(u); } + unique_ptr(unique_ptr&& u) { *this = std::move(u); } + unique_ptr& operator=(std::unique_ptr&& u) { + std::unique_ptr::reset(u.release()); + return *this; + } + unique_ptr& operator=(unique_ptr&& u) { + std::unique_ptr::reset(u.release()); + return *this; + } + unique_ptr& operator=(T* p) { + return std::unique_ptr::operator=(p); + } + }; + #endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) +#else + // Very limited implementation of unique_ptr. + // This is provided simply to allow the C++ code generated from the default + // settings to function in C++98 environments with no modifications. + template class unique_ptr { + public: + typedef T element_type; + + unique_ptr() : ptr_(nullptr) {} + explicit unique_ptr(T* p) : ptr_(p) {} + unique_ptr(unique_ptr&& u) : ptr_(nullptr) { reset(u.release()); } + unique_ptr(const unique_ptr& u) : ptr_(nullptr) { + reset(const_cast(&u)->release()); + } + ~unique_ptr() { reset(); } + + unique_ptr& operator=(const unique_ptr& u) { + reset(const_cast(&u)->release()); + return *this; + } + + unique_ptr& operator=(unique_ptr&& u) { + reset(u.release()); + return *this; + } + + unique_ptr& operator=(T* p) { + reset(p); + return *this; + } + + const T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + T* get() const noexcept { return ptr_; } + explicit operator bool() const { return ptr_ != nullptr; } + + // modifiers + T* release() { + T* value = ptr_; + ptr_ = nullptr; + return value; + } + + void reset(T* p = nullptr) { + T* value = ptr_; + ptr_ = p; + if (value) delete value; + } + + void swap(unique_ptr& u) { + T* temp_ptr = ptr_; + ptr_ = u.ptr_; + u.ptr_ = temp_ptr; + } + + private: + T* ptr_; + }; + + template bool operator==(const unique_ptr& x, + const unique_ptr& y) { + return x.get() == y.get(); + } + + template bool operator==(const unique_ptr& x, + const D* y) { + return static_cast(x.get()) == y; + } + + template bool operator==(const unique_ptr& x, intptr_t y) { + return reinterpret_cast(x.get()) == y; + } +#endif // !FLATBUFFERS_CPP98_STL + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_STL_EMULATION_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/util.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/util.h new file mode 100644 index 0000000000000000000000000000000000000000..ed926b4b59dbd9272d892e1b572a8db7a580cd0f --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/flatbuffers/include/util.h @@ -0,0 +1,654 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_UTIL_H_ +#define FLATBUFFERS_UTIL_H_ + +#include "base.h" + +#include + +#ifndef FLATBUFFERS_PREFER_PRINTF +# include +#else // FLATBUFFERS_PREFER_PRINTF +# include +# include +#endif // FLATBUFFERS_PREFER_PRINTF + +#include +#include + +namespace flatbuffers { + +// @locale-independent functions for ASCII characters set. + +// Fast checking that character lies in closed range: [a <= x <= b] +// using one compare (conditional branch) operator. +inline bool check_ascii_range(char x, char a, char b) { + FLATBUFFERS_ASSERT(a <= b); + // (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`. + // The x, a, b will be promoted to int and subtracted without overflow. + return static_cast(x - a) <= static_cast(b - a); +} + +// Case-insensitive isalpha +inline bool is_alpha(char c) { + // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). + return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF); +} + +// Check (case-insensitive) that `c` is equal to alpha. +inline bool is_alpha_char(char c, char alpha) { + FLATBUFFERS_ASSERT(is_alpha(alpha)); + // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). + return ((c & 0xDF) == (alpha & 0xDF)); +} + +// https://en.cppreference.com/w/cpp/string/byte/isxdigit +// isdigit and isxdigit are the only standard narrow character classification +// functions that are not affected by the currently installed C locale. although +// some implementations (e.g. Microsoft in 1252 codepage) may classify +// additional single-byte characters as digits. +inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); } + +inline bool is_xdigit(char c) { + // Replace by look-up table. + return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF); +} + +// Case-insensitive isalnum +inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); } + +// @end-locale-independent functions for ASCII character set + +#ifdef FLATBUFFERS_PREFER_PRINTF +template size_t IntToDigitCount(T t) { + size_t digit_count = 0; + // Count the sign for negative numbers + if (t < 0) digit_count++; + // Count a single 0 left of the dot for fractional numbers + if (-1 < t && t < 1) digit_count++; + // Count digits until fractional part + T eps = std::numeric_limits::epsilon(); + while (t <= (-1 + eps) || (1 - eps) <= t) { + t /= 10; + digit_count++; + } + return digit_count; +} + +template size_t NumToStringWidth(T t, int precision = 0) { + size_t string_width = IntToDigitCount(t); + // Count the dot for floating point numbers + if (precision) string_width += (precision + 1); + return string_width; +} + +template +std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0) { + size_t string_width = NumToStringWidth(t, precision); + std::string s(string_width, 0x00); + // Allow snprintf to use std::string trailing null to detect buffer overflow + snprintf(const_cast(s.data()), (s.size() + 1), fmt, precision, t); + return s; +} +#endif // FLATBUFFERS_PREFER_PRINTF + +// Convert an integer or floating point value to a string. +// In contrast to std::stringstream, "char" values are +// converted to a string of digits, and we don't use scientific notation. +template std::string NumToString(T t) { + // clang-format off + + #ifndef FLATBUFFERS_PREFER_PRINTF + std::stringstream ss; + ss << t; + return ss.str(); + #else // FLATBUFFERS_PREFER_PRINTF + auto v = static_cast(t); + return NumToStringImplWrapper(v, "%.*lld"); + #endif // FLATBUFFERS_PREFER_PRINTF + // clang-format on +} +// Avoid char types used as character data. +template<> inline std::string NumToString(signed char t) { + return NumToString(static_cast(t)); +} +template<> inline std::string NumToString(unsigned char t) { + return NumToString(static_cast(t)); +} +template<> inline std::string NumToString(char t) { + return NumToString(static_cast(t)); +} +#if defined(FLATBUFFERS_CPP98_STL) +template<> inline std::string NumToString(long long t) { + char buf[21]; // (log((1 << 63) - 1) / log(10)) + 2 + snprintf(buf, sizeof(buf), "%lld", t); + return std::string(buf); +} + +template<> +inline std::string NumToString(unsigned long long t) { + char buf[22]; // (log((1 << 63) - 1) / log(10)) + 1 + snprintf(buf, sizeof(buf), "%llu", t); + return std::string(buf); +} +#endif // defined(FLATBUFFERS_CPP98_STL) + +// Special versions for floats/doubles. +template std::string FloatToString(T t, int precision) { + // clang-format off + + #ifndef FLATBUFFERS_PREFER_PRINTF + // to_string() prints different numbers of digits for floats depending on + // platform and isn't available on Android, so we use stringstream + std::stringstream ss; + // Use std::fixed to suppress scientific notation. + ss << std::fixed; + // Default precision is 6, we want that to be higher for doubles. + ss << std::setprecision(precision); + ss << t; + auto s = ss.str(); + #else // FLATBUFFERS_PREFER_PRINTF + auto v = static_cast(t); + auto s = NumToStringImplWrapper(v, "%0.*f", precision); + #endif // FLATBUFFERS_PREFER_PRINTF + // clang-format on + // Sadly, std::fixed turns "1" into "1.00000", so here we undo that. + auto p = s.find_last_not_of('0'); + if (p != std::string::npos) { + // Strip trailing zeroes. If it is a whole number, keep one zero. + s.resize(p + (s[p] == '.' ? 2 : 1)); + } + return s; +} + +template<> inline std::string NumToString(double t) { + return FloatToString(t, 12); +} +template<> inline std::string NumToString(float t) { + return FloatToString(t, 6); +} + +// Convert an integer value to a hexadecimal string. +// The returned string length is always xdigits long, prefixed by 0 digits. +// For example, IntToStringHex(0x23, 8) returns the string "00000023". +inline std::string IntToStringHex(int i, int xdigits) { + FLATBUFFERS_ASSERT(i >= 0); + // clang-format off + + #ifndef FLATBUFFERS_PREFER_PRINTF + std::stringstream ss; + ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase + << i; + return ss.str(); + #else // FLATBUFFERS_PREFER_PRINTF + return NumToStringImplWrapper(i, "%.*X", xdigits); + #endif // FLATBUFFERS_PREFER_PRINTF + // clang-format on +} + +// clang-format off +// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}. +#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0) + class ClassicLocale { + #ifdef _MSC_VER + typedef _locale_t locale_type; + #else + typedef locale_t locale_type; // POSIX.1-2008 locale_t type + #endif + ClassicLocale(); + ~ClassicLocale(); + locale_type locale_; + static ClassicLocale instance_; + public: + static locale_type Get() { return instance_.locale_; } + }; + + #ifdef _MSC_VER + #define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get()) + #define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get()) + #define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get()) + #define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get()) + #else + #define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get()) + #define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get()) + #define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get()) + #define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get()) + #endif +#else + #define __strtod_impl(s, pe) strtod(s, pe) + #define __strtof_impl(s, pe) static_cast(strtod(s, pe)) + #ifdef _MSC_VER + #define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b) + #define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b) + #else + #define __strtoull_impl(s, pe, b) strtoull(s, pe, b) + #define __strtoll_impl(s, pe, b) strtoll(s, pe, b) + #endif +#endif + +inline void strtoval_impl(int64_t *val, const char *str, char **endptr, + int base) { + *val = __strtoll_impl(str, endptr, base); +} + +inline void strtoval_impl(uint64_t *val, const char *str, char **endptr, + int base) { + *val = __strtoull_impl(str, endptr, base); +} + +inline void strtoval_impl(double *val, const char *str, char **endptr) { + *val = __strtod_impl(str, endptr); +} + +// UBSAN: double to float is safe if numeric_limits::is_iec559 is true. +__supress_ubsan__("float-cast-overflow") +inline void strtoval_impl(float *val, const char *str, char **endptr) { + *val = __strtof_impl(str, endptr); +} +#undef __strtoull_impl +#undef __strtoll_impl +#undef __strtod_impl +#undef __strtof_impl +// clang-format on + +// Adaptor for strtoull()/strtoll(). +// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9), +// while strtoll with base=0 interprets first leading zero as octal prefix. +// In future, it is possible to add prefixed 0b0101. +// 1) Checks errno code for overflow condition (out of range). +// 2) If base <= 0, function try to detect base of number by prefix. +// +// Return value (like strtoull and strtoll, but reject partial result): +// - If successful, an integer value corresponding to the str is returned. +// - If full string conversion can't be performed, 0 is returned. +// - If the converted value falls out of range of corresponding return type, a +// range error occurs. In this case value MAX(T)/MIN(T) is returned. +template +inline bool StringToIntegerImpl(T *val, const char *const str, + const int base = 0, + const bool check_errno = true) { + // T is int64_t or uint64_T + FLATBUFFERS_ASSERT(str); + if (base <= 0) { + auto s = str; + while (*s && !is_digit(*s)) s++; + if (s[0] == '0' && is_alpha_char(s[1], 'X')) + return StringToIntegerImpl(val, str, 16, check_errno); + // if a prefix not match, try base=10 + return StringToIntegerImpl(val, str, 10, check_errno); + } else { + if (check_errno) errno = 0; // clear thread-local errno + auto endptr = str; + strtoval_impl(val, str, const_cast(&endptr), base); + if ((*endptr != '\0') || (endptr == str)) { + *val = 0; // erase partial result + return false; // invalid string + } + // errno is out-of-range, return MAX/MIN + if (check_errno && errno) return false; + return true; + } +} + +template +inline bool StringToFloatImpl(T *val, const char *const str) { + // Type T must be either float or double. + FLATBUFFERS_ASSERT(str && val); + auto end = str; + strtoval_impl(val, str, const_cast(&end)); + auto done = (end != str) && (*end == '\0'); + if (!done) *val = 0; // erase partial result + return done; +} + +// Convert a string to an instance of T. +// Return value (matched with StringToInteger64Impl and strtod): +// - If successful, a numeric value corresponding to the str is returned. +// - If full string conversion can't be performed, 0 is returned. +// - If the converted value falls out of range of corresponding return type, a +// range error occurs. In this case value MAX(T)/MIN(T) is returned. +template inline bool StringToNumber(const char *s, T *val) { + FLATBUFFERS_ASSERT(s && val); + int64_t i64; + // The errno check isn't needed, will return MAX/MIN on overflow. + if (StringToIntegerImpl(&i64, s, 0, false)) { + const int64_t max = flatbuffers::numeric_limits::max(); + const int64_t min = flatbuffers::numeric_limits::lowest(); + if (i64 > max) { + *val = static_cast(max); + return false; + } + if (i64 < min) { + // For unsigned types return max to distinguish from + // "no conversion can be performed" when 0 is returned. + *val = static_cast(flatbuffers::is_unsigned::value ? max : min); + return false; + } + *val = static_cast(i64); + return true; + } + *val = 0; + return false; +} + +template<> inline bool StringToNumber(const char *str, int64_t *val) { + return StringToIntegerImpl(val, str); +} + +template<> +inline bool StringToNumber(const char *str, uint64_t *val) { + if (!StringToIntegerImpl(val, str)) return false; + // The strtoull accepts negative numbers: + // If the minus sign was part of the input sequence, the numeric value + // calculated from the sequence of digits is negated as if by unary minus + // in the result type, which applies unsigned integer wraparound rules. + // Fix this behaviour (except -0). + if (*val) { + auto s = str; + while (*s && !is_digit(*s)) s++; + s = (s > str) ? (s - 1) : s; // step back to one symbol + if (*s == '-') { + // For unsigned types return the max to distinguish from + // "no conversion can be performed". + *val = flatbuffers::numeric_limits::max(); + return false; + } + } + return true; +} + +template<> inline bool StringToNumber(const char *s, float *val) { + return StringToFloatImpl(val, s); +} + +template<> inline bool StringToNumber(const char *s, double *val) { + return StringToFloatImpl(val, s); +} + +inline int64_t StringToInt(const char *s, int base = 10) { + int64_t val; + return StringToIntegerImpl(&val, s, base) ? val : 0; +} + +inline uint64_t StringToUInt(const char *s, int base = 10) { + uint64_t val; + return StringToIntegerImpl(&val, s, base) ? val : 0; +} + +typedef bool (*LoadFileFunction)(const char *filename, bool binary, + std::string *dest); +typedef bool (*FileExistsFunction)(const char *filename); + +LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function); + +FileExistsFunction SetFileExistsFunction( + FileExistsFunction file_exists_function); + +// Check if file "name" exists. +bool FileExists(const char *name); + +// Check if "name" exists and it is also a directory. +bool DirExists(const char *name); + +// Load file "name" into "buf" returning true if successful +// false otherwise. If "binary" is false data is read +// using ifstream's text mode, otherwise data is read with +// no transcoding. +bool LoadFile(const char *name, bool binary, std::string *buf); + +// Save data "buf" of length "len" bytes into a file +// "name" returning true if successful, false otherwise. +// If "binary" is false data is written using ifstream's +// text mode, otherwise data is written with no +// transcoding. +bool SaveFile(const char *name, const char *buf, size_t len, bool binary); + +// Save data "buf" into file "name" returning true if +// successful, false otherwise. If "binary" is false +// data is written using ifstream's text mode, otherwise +// data is written with no transcoding. +inline bool SaveFile(const char *name, const std::string &buf, bool binary) { + return SaveFile(name, buf.c_str(), buf.size(), binary); +} + +// Functionality for minimalistic portable path handling. + +// The functions below behave correctly regardless of whether posix ('/') or +// Windows ('/' or '\\') separators are used. + +// Any new separators inserted are always posix. +FLATBUFFERS_CONSTEXPR char kPathSeparator = '/'; + +// Returns the path with the extension, if any, removed. +std::string StripExtension(const std::string &filepath); + +// Returns the extension, if any. +std::string GetExtension(const std::string &filepath); + +// Return the last component of the path, after the last separator. +std::string StripPath(const std::string &filepath); + +// Strip the last component of the path + separator. +std::string StripFileName(const std::string &filepath); + +// Concatenates a path with a filename, regardless of wether the path +// ends in a separator or not. +std::string ConCatPathFileName(const std::string &path, + const std::string &filename); + +// Replaces any '\\' separators with '/' +std::string PosixPath(const char *path); + +// This function ensure a directory exists, by recursively +// creating dirs for any parts of the path that don't exist yet. +void EnsureDirExists(const std::string &filepath); + +// Obtains the absolute path from any other path. +// Returns the input path if the absolute path couldn't be resolved. +std::string AbsolutePath(const std::string &filepath); + +// To and from UTF-8 unicode conversion functions + +// Convert a unicode code point into a UTF-8 representation by appending it +// to a string. Returns the number of bytes generated. +inline int ToUTF8(uint32_t ucc, std::string *out) { + FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set. + // 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8 + for (int i = 0; i < 6; i++) { + // Max bits this encoding can represent. + uint32_t max_bits = 6 + i * 5 + static_cast(!i); + if (ucc < (1u << max_bits)) { // does it fit? + // Remaining bits not encoded in the first byte, store 6 bits each + uint32_t remain_bits = i * 6; + // Store first byte: + (*out) += static_cast((0xFE << (max_bits - remain_bits)) | + (ucc >> remain_bits)); + // Store remaining bytes: + for (int j = i - 1; j >= 0; j--) { + (*out) += static_cast(((ucc >> (j * 6)) & 0x3F) | 0x80); + } + return i + 1; // Return the number of bytes added. + } + } + FLATBUFFERS_ASSERT(0); // Impossible to arrive here. + return -1; +} + +// Converts whatever prefix of the incoming string corresponds to a valid +// UTF-8 sequence into a unicode code. The incoming pointer will have been +// advanced past all bytes parsed. +// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in +// this case). +inline int FromUTF8(const char **in) { + int len = 0; + // Count leading 1 bits. + for (int mask = 0x80; mask >= 0x04; mask >>= 1) { + if (**in & mask) { + len++; + } else { + break; + } + } + if ((static_cast(**in) << len) & 0x80) + return -1; // Bit after leading 1's must be 0. + if (!len) return *(*in)++; + // UTF-8 encoded values with a length are between 2 and 4 bytes. + if (len < 2 || len > 4) { return -1; } + // Grab initial bits of the code. + int ucc = *(*in)++ & ((1 << (7 - len)) - 1); + for (int i = 0; i < len - 1; i++) { + if ((**in & 0xC0) != 0x80) return -1; // Upper bits must 1 0. + ucc <<= 6; + ucc |= *(*in)++ & 0x3F; // Grab 6 more bits of the code. + } + // UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for + // UTF-16 surrogate pairs). + if (ucc >= 0xD800 && ucc <= 0xDFFF) { return -1; } + // UTF-8 must represent code points in their shortest possible encoding. + switch (len) { + case 2: + // Two bytes of UTF-8 can represent code points from U+0080 to U+07FF. + if (ucc < 0x0080 || ucc > 0x07FF) { return -1; } + break; + case 3: + // Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF. + if (ucc < 0x0800 || ucc > 0xFFFF) { return -1; } + break; + case 4: + // Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF. + if (ucc < 0x10000 || ucc > 0x10FFFF) { return -1; } + break; + } + return ucc; +} + +#ifndef FLATBUFFERS_PREFER_PRINTF +// Wraps a string to a maximum length, inserting new lines where necessary. Any +// existing whitespace will be collapsed down to a single space. A prefix or +// suffix can be provided, which will be inserted before or after a wrapped +// line, respectively. +inline std::string WordWrap(const std::string in, size_t max_length, + const std::string wrapped_line_prefix, + const std::string wrapped_line_suffix) { + std::istringstream in_stream(in); + std::string wrapped, line, word; + + in_stream >> word; + line = word; + + while (in_stream >> word) { + if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) < + max_length) { + line += " " + word; + } else { + wrapped += line + wrapped_line_suffix + "\n"; + line = wrapped_line_prefix + word; + } + } + wrapped += line; + + return wrapped; +} +#endif // !FLATBUFFERS_PREFER_PRINTF + +inline bool EscapeString(const char *s, size_t length, std::string *_text, + bool allow_non_utf8, bool natural_utf8) { + std::string &text = *_text; + text += "\""; + for (uoffset_t i = 0; i < length; i++) { + char c = s[i]; + switch (c) { + case '\n': text += "\\n"; break; + case '\t': text += "\\t"; break; + case '\r': text += "\\r"; break; + case '\b': text += "\\b"; break; + case '\f': text += "\\f"; break; + case '\"': text += "\\\""; break; + case '\\': text += "\\\\"; break; + default: + if (c >= ' ' && c <= '~') { + text += c; + } else { + // Not printable ASCII data. Let's see if it's valid UTF-8 first: + const char *utf8 = s + i; + int ucc = FromUTF8(&utf8); + if (ucc < 0) { + if (allow_non_utf8) { + text += "\\x"; + text += IntToStringHex(static_cast(c), 2); + } else { + // There are two cases here: + // + // 1) We reached here by parsing an IDL file. In that case, + // we previously checked for non-UTF-8, so we shouldn't reach + // here. + // + // 2) We reached here by someone calling GenerateText() + // on a previously-serialized flatbuffer. The data might have + // non-UTF-8 Strings, or might be corrupt. + // + // In both cases, we have to give up and inform the caller + // they have no JSON. + return false; + } + } else { + if (natural_utf8) { + // utf8 points to past all utf-8 bytes parsed + text.append(s + i, static_cast(utf8 - s - i)); + } else if (ucc <= 0xFFFF) { + // Parses as Unicode within JSON's \uXXXX range, so use that. + text += "\\u"; + text += IntToStringHex(ucc, 4); + } else if (ucc <= 0x10FFFF) { + // Encode Unicode SMP values to a surrogate pair using two \u + // escapes. + uint32_t base = ucc - 0x10000; + auto high_surrogate = (base >> 10) + 0xD800; + auto low_surrogate = (base & 0x03FF) + 0xDC00; + text += "\\u"; + text += IntToStringHex(high_surrogate, 4); + text += "\\u"; + text += IntToStringHex(low_surrogate, 4); + } + // Skip past characters recognized. + i = static_cast(utf8 - s - 1); + } + } + break; + } + } + text += "\""; + return true; +} + +// Remove paired quotes in a string: "text"|'text' -> text. +std::string RemoveStringQuotes(const std::string &s); + +// Change th global C-locale to locale with name . +// Returns an actual locale name in <_value>, useful if locale_name is "" or +// null. +bool SetGlobalTestLocale(const char *locale_name, + std::string *_value = nullptr); + +// Read (or test) a value of environment variable. +bool ReadEnvironmentVariable(const char *var_name, + std::string *_value = nullptr); + +} // namespace MindSpore.flatbuffers + +#endif // FLATBUFFERS_UTIL_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/ir/dtype/type_id.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/ir/dtype/type_id.h new file mode 100644 index 0000000000000000000000000000000000000000..254c3b092915b2ff0af12f1fe06dee69ac84c5f2 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/ir/dtype/type_id.h @@ -0,0 +1,87 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ +#define MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ + +#include +#include + +namespace mindspore { +// +// Supported meta type +// +enum TypeId : int { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeIndexedSlicesType, + kObjectTypeSparseTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeEnd +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/lite_session.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/lite_session.h new file mode 100644 index 0000000000000000000000000000000000000000..f3ac3ea288bc498fbd049ac6f8a10e91b967d97b --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/lite_session.h @@ -0,0 +1,127 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_SESSION_H +#define MINDSPORE_LITE_INCLUDE_LITE_SESSION_H + +#include +#include +#include +#include +#include "ms_tensor.h" +#include "model.h" +#include "context.h" + +namespace mindspore { +namespace session { +/// \brief CallBackParam defined input arguments for callBack function. +struct CallBackParam { + std::string name_callback_param; /**< node name argument */ + std::string type_callback_param; /**< node type argument */ +}; + +/// \brief KernelCallBack defined the function pointer for callBack. +using KernelCallBack = std::function inputs, + std::vector outputs, const CallBackParam &opInfo)>; + +/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model. +class MS_API LiteSession { + public: + /// \brief Static method to create a LiteSession pointer. + /// + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(lite::Context *context); + + /// \brief Destructor of MindSpore Lite LiteSession. + virtual ~LiteSession() = default; + + /// \brief Attempt to bind or unbind threads in the thread pool to or from the specified cpu core. + /// + /// \param[in] if_bind Define whether to bind or unbind threads. + virtual void BindThread(bool if_bind) = 0; + + /// \brief Compile MindSpore Lite model. + /// + /// \note CompileGraph should be called before RunGraph. + /// + /// \param[in] model Define the model to be compiled. + /// + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h. + virtual int CompileGraph(lite::Model *model) = 0; + + /// \brief Get input MindSpore Lite MSTensors of model. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual std::vector GetInputs() const = 0; + + /// \brief Get input MindSpore Lite MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual std::vector GetInputsByName(const std::string &node_name) const = 0; + + /// \brief Run session with callback. + /// + /// \param[in] before Define a call_back_function to be called before running each node. + /// \param[in] after Define a call_back_function called after running each node. + /// + /// \note RunGraph should be called after CompileGraph. + /// + /// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. + virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; + + /// \brief Get output MindSpore Lite MSTensors of model mapped by node name. + /// + /// \return The map of output node name and MindSpore Lite MSTensor. + virtual std::unordered_map> GetOutputMapByNode() const = 0; + + /// \brief Get output MindSpore Lite MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual std::vector GetOutputsByNodeName(const std::string &node_name) const = 0; + + /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. + /// + /// \return The map of output tensor name and MindSpore Lite MSTensor. + virtual std::unordered_map GetOutputMapByTensor() const = 0; + + /// \brief Get name of output tensors of model compiled by this session. + /// + /// \return The vector of string as output tensor names in order. + virtual std::vector GetOutputTensorNames() const = 0; + + /// \brief Get output MindSpore Lite MSTensors of model by tensor name. + /// + /// \param[in] tensor_name Define tensor name. + /// + /// \return Pointer of MindSpore Lite MSTensor. + virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const = 0; + + /// \brief Resize inputs shape. + /// + /// \param[in] inputs Define the new inputs shape. + /// + /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. + virtual int Resize(const std::vector &inputs) = 0; +}; +} // namespace session +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/model.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/model.h new file mode 100644 index 0000000000000000000000000000000000000000..2a880ca0ae78a75bf1848185ede0d19c54ee33a0 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/model.h @@ -0,0 +1,110 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_MODEL_H +#define MINDSPORE_LITE_INCLUDE_MODEL_H + +#include +#include +#include +#include "schema/model_generated.h" + +namespace mindspore { +#define MS_API __attribute__((visibility("default"))) + +namespace lite { +/// \brief ModelImpl defined the implement class of Model in MindSpore Lite. +/// +/// \note List public class and interface for reference. +class ModelImpl; + +/// \brief Primitive defined as prototype of operator. +/// +/// \note List public class and interface for reference. +class PrimitiveC; + +/// \brief Model defined model in MindSpore Lite for managing graph. +class MS_API Model { + public: + /// \brief Static method to create a Model pointer. + /// + /// \param[in] model_buf Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// + /// \return Pointer of MindSpore Lite Model. + static Model *Import(const char *model_buf, size_t size); + + /// \brief Constructor of MindSpore Lite Model using default value for parameters. + /// + /// \return Instance of MindSpore Lite Model. + Model() = default; + + /// \brief Destructor of MindSpore Lite Model. + virtual ~Model(); + + /// \brief Get MindSpore Lite Primitive by name. + /// + /// \param[in] name Define name of primitive to be returned. + /// + /// \return the pointer of MindSpore Lite Primitive. + PrimitiveC *GetOp(const std::string &name) const; + + /// \brief Get graph defined in flatbuffers. + /// + /// \return the pointer of graph defined in flatbuffers. + const schema::MetaGraph *GetMetaGraph() const; + + /// \brief Free MetaGraph in MindSpore Lite Model. + void FreeMetaGraph(); + + protected: + ModelImpl *model_impl_ = nullptr; +}; + +/// \brief ModelBuilder defined by MindSpore Lite. +class MS_API ModelBuilder { + public: + /// \brief OutEdge defined by MindSpore Lite. + struct OutEdge { + std::string nodeId; /**< ID of a node linked by this edge */ + size_t outEdgeIndex; /**< Index of this edge */ + }; + + /// \brief Constructor of MindSpore Lite Model using default value for parameters. + /// + /// \return Instance of MindSpore Lite ModelBuilder. + ModelBuilder() = default; + + /// \brief Destructor of MindSpore Lite ModelBuilder. + virtual ~ModelBuilder() = default; + + /// \brief Add primitive into model builder for model building. + /// + /// \param[in] op Define the primitive to be added. + /// \param[in] inputs Define input edge of primitive to be added. + /// + /// \return ID of the added primitive. + virtual std::string AddOp(const PrimitiveC &op, const std::vector &inputs) = 0; + + /// \brief Finish constructing the model. + /// + /// \return the pointer of MindSpore Lite Model. + virtual Model *Construct(); +}; +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_MODEL_H diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/ms_tensor.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/ms_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..41b8131b9f80278fad558e4c0ed8b4839d20d158 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/ms_tensor.h @@ -0,0 +1,108 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ +#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ + +#include +#include +#include +#include "ir/dtype/type_id.h" + +namespace mindspore { +#define MS_API __attribute__((visibility("default"))) +namespace tensor { +/// \brief MSTensor defined tensor in MindSpore Lite. +class MS_API MSTensor { + public: + /// \brief Constructor of MindSpore Lite MSTensor. + /// + /// \return Instance of MindSpore Lite MSTensor. + MSTensor() = default; + + /// \brief Static method to create a MSTensor pointer. + /// + /// \param[in] data_type Define data type of tensor to be created. + /// \param[in] shape Define Shape of tensor to be created. + /// + /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum are + /// suitable for MSTensor. + /// + /// \return the pointer of MSTensor. + static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); + + /// \brief Destructor of MindSpore Lite Model. + virtual ~MSTensor() = default; + + /// \brief Get data type of the MindSpore Lite MSTensor. + /// + /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum are + /// suitable for MSTensor. + /// + /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. + virtual TypeId data_type() const = 0; + + /// \brief Set data type for the MindSpore Lite MSTensor. + /// + /// \param[in] data_type Define MindSpore Lite TypeId to be set in the MindSpore Lite MSTensor. + /// + /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor after set. + virtual TypeId set_data_type(TypeId data_type) = 0; + + /// \brief Get shape of the MindSpore Lite MSTensor. + /// + /// \return A vector of int as the shape of the MindSpore Lite MSTensor. + virtual std::vector shape() const = 0; + + /// \brief Set shape for the MindSpore Lite MSTensor. + /// + /// \param[in] shape Define a vector of int as shape to be set into the MindSpore Lite MSTensor. + /// + /// \return size of shape of the MindSpore Lite MSTensor after set. + virtual size_t set_shape(const std::vector &shape) = 0; + + /// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. + /// + /// \param[in] index Define index of dimension returned. + /// + /// \return Size of dimension of the MindSpore Lite MSTensor. + virtual int DimensionSize(size_t index) const = 0; + + /// \brief Get number of element in MSTensor. + /// + /// \return Number of element in MSTensor. + virtual int ElementsNum() const = 0; + + /// \brief Get hash of the MindSpore Lite MSTensor. + /// + /// \return Hash of the MindSpore Lite MSTensor. + virtual std::size_t hash() const = 0; + + /// \brief Get byte size of data in MSTensor. + /// + /// \return Byte size of data in MSTensor. + virtual size_t Size() const = 0; + + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. + /// + /// \return the pointer points to data in MSTensor. + virtual void *MutableData() const = 0; +}; +} // namespace tensor +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/schema/model_generated.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/schema/model_generated.h new file mode 100644 index 0000000000000000000000000000000000000000..ec16e43e445c34e2877180ff38a9eda72d08cae7 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/schema/model_generated.h @@ -0,0 +1,3275 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_MODEL_MINDSPORE_SCHEMA_H_ +#define FLATBUFFERS_GENERATED_MODEL_MINDSPORE_SCHEMA_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "ops_generated.h" + +namespace mindspore { +namespace schema { + +struct QuantParam; + +struct Tensor; + +struct Primitive; + +struct CNode; + +struct MetaGraph; + +enum NodeType { + NodeType_ValueNode = 0, + NodeType_Parameter = 1, + NodeType_CNode = 2, + NodeType_MIN = NodeType_ValueNode, + NodeType_MAX = NodeType_CNode +}; + +inline const NodeType (&EnumValuesNodeType())[3] { + static const NodeType values[] = { + NodeType_ValueNode, + NodeType_Parameter, + NodeType_CNode + }; + return values; +} + +inline const char * const *EnumNamesNodeType() { + static const char * const names[] = { + "ValueNode", + "Parameter", + "CNode", + nullptr + }; + return names; +} + +inline const char *EnumNameNodeType(NodeType e) { + if (e < NodeType_ValueNode || e > NodeType_CNode) return ""; + const size_t index = static_cast(e); + return EnumNamesNodeType()[index]; +} + +enum PrimitiveType { + PrimitiveType_NONE = 0, + PrimitiveType_Concat = 1, + PrimitiveType_SoftMax = 2, + PrimitiveType_Activation = 3, + PrimitiveType_Conv2D = 4, + PrimitiveType_FusedBatchNorm = 5, + PrimitiveType_CaffeBatchNorm = 6, + PrimitiveType_BiasAdd = 7, + PrimitiveType_Pooling = 8, + PrimitiveType_DepthwiseConv2D = 9, + PrimitiveType_DeDepthwiseConv2D = 10, + PrimitiveType_Resize = 11, + PrimitiveType_DetectionPostProcess = 12, + PrimitiveType_FullConnection = 13, + PrimitiveType_Mean = 14, + PrimitiveType_DeConv2D = 15, + PrimitiveType_Scale = 16, + PrimitiveType_Reshape = 17, + PrimitiveType_Eltwise = 18, + PrimitiveType_NetOutput = 19, + PrimitiveType_Add = 20, + PrimitiveType_Sub = 21, + PrimitiveType_MatMul = 22, + PrimitiveType_StridedSlice = 23, + PrimitiveType_Power = 24, + PrimitiveType_Slice = 25, + PrimitiveType_Stack = 26, + PrimitiveType_Mul = 27, + PrimitiveType_RealDiv = 28, + PrimitiveType_Pad = 29, + PrimitiveType_Maximum = 30, + PrimitiveType_Minimum = 31, + PrimitiveType_CaffePReLU = 32, + PrimitiveType_LeakyReLU = 33, + PrimitiveType_ArgMax = 34, + PrimitiveType_ArgMin = 35, + PrimitiveType_Exp = 36, + PrimitiveType_Crop = 37, + PrimitiveType_Range = 38, + PrimitiveType_Rsqrt = 39, + PrimitiveType_ExpandDims = 40, + PrimitiveType_Tile = 41, + PrimitiveType_Cast = 42, + PrimitiveType_Shape = 43, + PrimitiveType_Nchw2Nhwc = 44, + PrimitiveType_Nhwc2Nchw = 45, + PrimitiveType_QuantDTypeCast = 46, + PrimitiveType_Split = 47, + PrimitiveType_Permute = 48, + PrimitiveType_FakeQuantWithMinMaxVars = 49, + PrimitiveType_Equal = 50, + PrimitiveType_Less = 51, + PrimitiveType_Greater = 52, + PrimitiveType_NotEqual = 53, + PrimitiveType_LessEqual = 54, + PrimitiveType_GreaterEqual = 55, + PrimitiveType_Min = 56, + PrimitiveType_Floor = 57, + PrimitiveType_Abs = 58, + PrimitiveType_Neg = 59, + PrimitiveType_Cos = 60, + PrimitiveType_Sin = 61, + PrimitiveType_Sqrt = 62, + PrimitiveType_Square = 63, + PrimitiveType_Constant = 64, + PrimitiveType_Log = 65, + PrimitiveType_Tan = 66, + PrimitiveType_Atan = 67, + PrimitiveType_Asin = 68, + PrimitiveType_Clip = 69, + PrimitiveType_Transpose = 70, + PrimitiveType_Squeeze = 71, + PrimitiveType_Unsqueeze = 72, + PrimitiveType_Upsample = 73, + PrimitiveType_Dropout = 74, + PrimitiveType_Broadcast = 75, + PrimitiveType_BroadcastTo = 76, + PrimitiveType_Lrn = 77, + PrimitiveType_Prelu = 78, + PrimitiveType_ZerosLike = 79, + PrimitiveType_TopK = 80, + PrimitiveType_SpaceToDepth = 81, + PrimitiveType_SpaceToBatch = 82, + PrimitiveType_SparseToDense = 83, + PrimitiveType_ReverseSequence = 84, + PrimitiveType_Rank = 85, + PrimitiveType_Gather = 86, + PrimitiveType_GatherNd = 87, + PrimitiveType_Fill = 88, + PrimitiveType_Elu = 89, + PrimitiveType_DepthToSpace = 90, + PrimitiveType_BatchToSpace = 91, + PrimitiveType_AddN = 92, + PrimitiveType_Ceil = 93, + PrimitiveType_EmbeddingLookup = 94, + PrimitiveType_EmbeddingLookupSparse = 95, + PrimitiveType_FloorDiv = 96, + PrimitiveType_FloorMod = 97, + PrimitiveType_L2Norm = 98, + PrimitiveType_LocalResponseNormalization = 99, + PrimitiveType_MatrixDiag = 100, + PrimitiveType_Reduce = 101, + PrimitiveType_Reverse = 102, + PrimitiveType_Round = 103, + PrimitiveType_Select = 104, + PrimitiveType_Scatter = 105, + PrimitiveType_ScatterND = 106, + PrimitiveType_Unique = 107, + PrimitiveType_Unstack = 108, + PrimitiveType_LogicalAnd = 109, + PrimitiveType_LogicalOr = 110, + PrimitiveType_LogicalXor = 111, + PrimitiveType_LogicalNot = 112, + PrimitiveType_OnnxInt8Quantize = 113, + PrimitiveType_OnnxInt8Dequantize = 114, + PrimitiveType_FakeQuantWithMinMax = 115, + PrimitiveType_FakeQuantWithMinMaxPerChannel = 116, + PrimitiveType_BatchNormFold = 117, + PrimitiveType_MulFold = 118, + PrimitiveType_AddFold = 119, + PrimitiveType_SquaredDifference = 120, + PrimitiveType_Flatten = 121, + PrimitiveType_TupleGetItem = 122, + PrimitiveType_Div = 123, + PrimitiveType_Where = 124, + PrimitiveType_OneHot = 125, + PrimitiveType_Lstm = 126, + PrimitiveType_Conv2DGradFilter = 127, + PrimitiveType_Conv2DGradInput = 128, + PrimitiveType_PoolingGrad = 129, + PrimitiveType_BNGradInput = 130, + PrimitiveType_OptMomentum = 131, + PrimitiveType_BiasGrad = 132, + PrimitiveType_SoftmaxCrossEntropy = 133, + PrimitiveType_AddGrad = 134, + PrimitiveType_SubGrad = 135, + PrimitiveType_MulGrad = 136, + PrimitiveType_DivGrad = 137, + PrimitiveType_PowerGrad = 138, + PrimitiveType_ActivationGrad = 139, + PrimitiveType_PriorBox = 140, + PrimitiveType_SpaceToBatchND = 141, + PrimitiveType_TopKV2 = 142, + PrimitiveType_MIN = PrimitiveType_NONE, + PrimitiveType_MAX = PrimitiveType_TopKV2 +}; + +inline const PrimitiveType (&EnumValuesPrimitiveType())[143] { + static const PrimitiveType values[] = { + PrimitiveType_NONE, + PrimitiveType_Concat, + PrimitiveType_SoftMax, + PrimitiveType_Activation, + PrimitiveType_Conv2D, + PrimitiveType_FusedBatchNorm, + PrimitiveType_CaffeBatchNorm, + PrimitiveType_BiasAdd, + PrimitiveType_Pooling, + PrimitiveType_DepthwiseConv2D, + PrimitiveType_DeDepthwiseConv2D, + PrimitiveType_Resize, + PrimitiveType_DetectionPostProcess, + PrimitiveType_FullConnection, + PrimitiveType_Mean, + PrimitiveType_DeConv2D, + PrimitiveType_Scale, + PrimitiveType_Reshape, + PrimitiveType_Eltwise, + PrimitiveType_NetOutput, + PrimitiveType_Add, + PrimitiveType_Sub, + PrimitiveType_MatMul, + PrimitiveType_StridedSlice, + PrimitiveType_Power, + PrimitiveType_Slice, + PrimitiveType_Stack, + PrimitiveType_Mul, + PrimitiveType_RealDiv, + PrimitiveType_Pad, + PrimitiveType_Maximum, + PrimitiveType_Minimum, + PrimitiveType_CaffePReLU, + PrimitiveType_LeakyReLU, + PrimitiveType_ArgMax, + PrimitiveType_ArgMin, + PrimitiveType_Exp, + PrimitiveType_Crop, + PrimitiveType_Range, + PrimitiveType_Rsqrt, + PrimitiveType_ExpandDims, + PrimitiveType_Tile, + PrimitiveType_Cast, + PrimitiveType_Shape, + PrimitiveType_Nchw2Nhwc, + PrimitiveType_Nhwc2Nchw, + PrimitiveType_QuantDTypeCast, + PrimitiveType_Split, + PrimitiveType_Permute, + PrimitiveType_FakeQuantWithMinMaxVars, + PrimitiveType_Equal, + PrimitiveType_Less, + PrimitiveType_Greater, + PrimitiveType_NotEqual, + PrimitiveType_LessEqual, + PrimitiveType_GreaterEqual, + PrimitiveType_Min, + PrimitiveType_Floor, + PrimitiveType_Abs, + PrimitiveType_Neg, + PrimitiveType_Cos, + PrimitiveType_Sin, + PrimitiveType_Sqrt, + PrimitiveType_Square, + PrimitiveType_Constant, + PrimitiveType_Log, + PrimitiveType_Tan, + PrimitiveType_Atan, + PrimitiveType_Asin, + PrimitiveType_Clip, + PrimitiveType_Transpose, + PrimitiveType_Squeeze, + PrimitiveType_Unsqueeze, + PrimitiveType_Upsample, + PrimitiveType_Dropout, + PrimitiveType_Broadcast, + PrimitiveType_BroadcastTo, + PrimitiveType_Lrn, + PrimitiveType_Prelu, + PrimitiveType_ZerosLike, + PrimitiveType_TopK, + PrimitiveType_SpaceToDepth, + PrimitiveType_SpaceToBatch, + PrimitiveType_SparseToDense, + PrimitiveType_ReverseSequence, + PrimitiveType_Rank, + PrimitiveType_Gather, + PrimitiveType_GatherNd, + PrimitiveType_Fill, + PrimitiveType_Elu, + PrimitiveType_DepthToSpace, + PrimitiveType_BatchToSpace, + PrimitiveType_AddN, + PrimitiveType_Ceil, + PrimitiveType_EmbeddingLookup, + PrimitiveType_EmbeddingLookupSparse, + PrimitiveType_FloorDiv, + PrimitiveType_FloorMod, + PrimitiveType_L2Norm, + PrimitiveType_LocalResponseNormalization, + PrimitiveType_MatrixDiag, + PrimitiveType_Reduce, + PrimitiveType_Reverse, + PrimitiveType_Round, + PrimitiveType_Select, + PrimitiveType_Scatter, + PrimitiveType_ScatterND, + PrimitiveType_Unique, + PrimitiveType_Unstack, + PrimitiveType_LogicalAnd, + PrimitiveType_LogicalOr, + PrimitiveType_LogicalXor, + PrimitiveType_LogicalNot, + PrimitiveType_OnnxInt8Quantize, + PrimitiveType_OnnxInt8Dequantize, + PrimitiveType_FakeQuantWithMinMax, + PrimitiveType_FakeQuantWithMinMaxPerChannel, + PrimitiveType_BatchNormFold, + PrimitiveType_MulFold, + PrimitiveType_AddFold, + PrimitiveType_SquaredDifference, + PrimitiveType_Flatten, + PrimitiveType_TupleGetItem, + PrimitiveType_Div, + PrimitiveType_Where, + PrimitiveType_OneHot, + PrimitiveType_Lstm, + PrimitiveType_Conv2DGradFilter, + PrimitiveType_Conv2DGradInput, + PrimitiveType_PoolingGrad, + PrimitiveType_BNGradInput, + PrimitiveType_OptMomentum, + PrimitiveType_BiasGrad, + PrimitiveType_SoftmaxCrossEntropy, + PrimitiveType_AddGrad, + PrimitiveType_SubGrad, + PrimitiveType_MulGrad, + PrimitiveType_DivGrad, + PrimitiveType_PowerGrad, + PrimitiveType_ActivationGrad, + PrimitiveType_PriorBox, + PrimitiveType_SpaceToBatchND, + PrimitiveType_TopKV2 + }; + return values; +} + +inline const char * const *EnumNamesPrimitiveType() { + static const char * const names[] = { + "NONE", + "Concat", + "SoftMax", + "Activation", + "Conv2D", + "FusedBatchNorm", + "CaffeBatchNorm", + "BiasAdd", + "Pooling", + "DepthwiseConv2D", + "DeDepthwiseConv2D", + "Resize", + "DetectionPostProcess", + "FullConnection", + "Mean", + "DeConv2D", + "Scale", + "Reshape", + "Eltwise", + "NetOutput", + "Add", + "Sub", + "MatMul", + "StridedSlice", + "Power", + "Slice", + "Stack", + "Mul", + "RealDiv", + "Pad", + "Maximum", + "Minimum", + "CaffePReLU", + "LeakyReLU", + "ArgMax", + "ArgMin", + "Exp", + "Crop", + "Range", + "Rsqrt", + "ExpandDims", + "Tile", + "Cast", + "Shape", + "Nchw2Nhwc", + "Nhwc2Nchw", + "QuantDTypeCast", + "Split", + "Permute", + "FakeQuantWithMinMaxVars", + "Equal", + "Less", + "Greater", + "NotEqual", + "LessEqual", + "GreaterEqual", + "Min", + "Floor", + "Abs", + "Neg", + "Cos", + "Sin", + "Sqrt", + "Square", + "Constant", + "Log", + "Tan", + "Atan", + "Asin", + "Clip", + "Transpose", + "Squeeze", + "Unsqueeze", + "Upsample", + "Dropout", + "Broadcast", + "BroadcastTo", + "Lrn", + "Prelu", + "ZerosLike", + "TopK", + "SpaceToDepth", + "SpaceToBatch", + "SparseToDense", + "ReverseSequence", + "Rank", + "Gather", + "GatherNd", + "Fill", + "Elu", + "DepthToSpace", + "BatchToSpace", + "AddN", + "Ceil", + "EmbeddingLookup", + "EmbeddingLookupSparse", + "FloorDiv", + "FloorMod", + "L2Norm", + "LocalResponseNormalization", + "MatrixDiag", + "Reduce", + "Reverse", + "Round", + "Select", + "Scatter", + "ScatterND", + "Unique", + "Unstack", + "LogicalAnd", + "LogicalOr", + "LogicalXor", + "LogicalNot", + "OnnxInt8Quantize", + "OnnxInt8Dequantize", + "FakeQuantWithMinMax", + "FakeQuantWithMinMaxPerChannel", + "BatchNormFold", + "MulFold", + "AddFold", + "SquaredDifference", + "Flatten", + "TupleGetItem", + "Div", + "Where", + "OneHot", + "Lstm", + "Conv2DGradFilter", + "Conv2DGradInput", + "PoolingGrad", + "BNGradInput", + "OptMomentum", + "BiasGrad", + "SoftmaxCrossEntropy", + "AddGrad", + "SubGrad", + "MulGrad", + "DivGrad", + "PowerGrad", + "ActivationGrad", + "PriorBox", + "SpaceToBatchND", + "TopKV2", + nullptr + }; + return names; +} + +inline const char *EnumNamePrimitiveType(PrimitiveType e) { + if (e < PrimitiveType_NONE || e > PrimitiveType_TopKV2) return ""; + const size_t index = static_cast(e); + return EnumNamesPrimitiveType()[index]; +} + +template struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NONE; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Concat; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SoftMax; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Activation; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Conv2D; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FusedBatchNorm; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CaffeBatchNorm; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BiasAdd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Pooling; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DepthwiseConv2D; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DeDepthwiseConv2D; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Resize; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DetectionPostProcess; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FullConnection; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Mean; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DeConv2D; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Scale; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Reshape; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Eltwise; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NetOutput; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Add; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Sub; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MatMul; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_StridedSlice; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Power; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Slice; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Stack; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Mul; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_RealDiv; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Pad; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Maximum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Minimum; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_CaffePReLU; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LeakyReLU; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ArgMax; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ArgMin; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Exp; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Crop; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Range; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Rsqrt; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ExpandDims; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Tile; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Cast; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Shape; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Nchw2Nhwc; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Nhwc2Nchw; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_QuantDTypeCast; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Split; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Permute; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FakeQuantWithMinMaxVars; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Equal; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Less; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Greater; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_NotEqual; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LessEqual; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GreaterEqual; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Min; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Floor; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Abs; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Neg; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Cos; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Sin; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Sqrt; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Square; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Constant; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Log; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Tan; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Atan; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Asin; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Clip; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Transpose; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Squeeze; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Unsqueeze; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Upsample; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Dropout; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Broadcast; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BroadcastTo; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Lrn; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Prelu; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ZerosLike; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_TopK; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SpaceToDepth; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SpaceToBatch; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_SparseToDense; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_ReverseSequence; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Rank; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Gather; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_GatherNd; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Fill; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Elu; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_DepthToSpace; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_BatchToSpace; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_AddN; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Ceil; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_EmbeddingLookup; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_EmbeddingLookupSparse; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FloorDiv; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_FloorMod; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_L2Norm; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_LocalResponseNormalization; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_MatrixDiag; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Reduce; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Reverse; +}; + +template<> struct PrimitiveTypeTraits { + static const PrimitiveType enum_value = PrimitiveType_Round; +}; + +template<> struct PrimitiveTypeTraits() const { + return value_as_Select(); +} + +template<> inline const Scatter *Primitive::value_as() const { + return value_as_Scatter(); +} + +template<> inline const ScatterND *Primitive::value_as() const { + return value_as_ScatterND(); +} + +template<> inline const Unique *Primitive::value_as() const { + return value_as_Unique(); +} + +template<> inline const Unstack *Primitive::value_as() const { + return value_as_Unstack(); +} + +template<> inline const LogicalAnd *Primitive::value_as() const { + return value_as_LogicalAnd(); +} + +template<> inline const LogicalOr *Primitive::value_as() const { + return value_as_LogicalOr(); +} + +template<> inline const LogicalXor *Primitive::value_as() const { + return value_as_LogicalXor(); +} + +template<> inline const LogicalNot *Primitive::value_as() const { + return value_as_LogicalNot(); +} + +template<> inline const OnnxInt8Quantize *Primitive::value_as() const { + return value_as_OnnxInt8Quantize(); +} + +template<> inline const OnnxInt8Dequantize *Primitive::value_as() const { + return value_as_OnnxInt8Dequantize(); +} + +template<> inline const FakeQuantWithMinMax *Primitive::value_as() const { + return value_as_FakeQuantWithMinMax(); +} + +template<> inline const FakeQuantWithMinMaxPerChannel *Primitive::value_as() const { + return value_as_FakeQuantWithMinMaxPerChannel(); +} + +template<> inline const BatchNormFold *Primitive::value_as() const { + return value_as_BatchNormFold(); +} + +template<> inline const MulFold *Primitive::value_as() const { + return value_as_MulFold(); +} + +template<> inline const AddFold *Primitive::value_as() const { + return value_as_AddFold(); +} + +template<> inline const SquaredDifference *Primitive::value_as() const { + return value_as_SquaredDifference(); +} + +template<> inline const Flatten *Primitive::value_as() const { + return value_as_Flatten(); +} + +template<> inline const TupleGetItem *Primitive::value_as() const { + return value_as_TupleGetItem(); +} + +template<> inline const Div *Primitive::value_as
() const { + return value_as_Div(); +} + +template<> inline const Where *Primitive::value_as() const { + return value_as_Where(); +} + +template<> inline const OneHot *Primitive::value_as() const { + return value_as_OneHot(); +} + +template<> inline const Lstm *Primitive::value_as() const { + return value_as_Lstm(); +} + +template<> inline const Conv2DGradFilter *Primitive::value_as() const { + return value_as_Conv2DGradFilter(); +} + +template<> inline const Conv2DGradInput *Primitive::value_as() const { + return value_as_Conv2DGradInput(); +} + +template<> inline const PoolingGrad *Primitive::value_as() const { + return value_as_PoolingGrad(); +} + +template<> inline const BNGradInput *Primitive::value_as() const { + return value_as_BNGradInput(); +} + +template<> inline const OptMomentum *Primitive::value_as() const { + return value_as_OptMomentum(); +} + +template<> inline const BiasGrad *Primitive::value_as() const { + return value_as_BiasGrad(); +} + +template<> inline const SoftmaxCrossEntropy *Primitive::value_as() const { + return value_as_SoftmaxCrossEntropy(); +} + +template<> inline const AddGrad *Primitive::value_as() const { + return value_as_AddGrad(); +} + +template<> inline const SubGrad *Primitive::value_as() const { + return value_as_SubGrad(); +} + +template<> inline const MulGrad *Primitive::value_as() const { + return value_as_MulGrad(); +} + +template<> inline const DivGrad *Primitive::value_as() const { + return value_as_DivGrad(); +} + +template<> inline const PowerGrad *Primitive::value_as() const { + return value_as_PowerGrad(); +} + +template<> inline const ActivationGrad *Primitive::value_as() const { + return value_as_ActivationGrad(); +} + +template<> inline const PriorBox *Primitive::value_as() const { + return value_as_PriorBox(); +} + +template<> inline const SpaceToBatchND *Primitive::value_as() const { + return value_as_SpaceToBatchND(); +} + +template<> inline const TopKV2 *Primitive::value_as() const { + return value_as_TopKV2(); +} + +struct PrimitiveBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_value_type(PrimitiveType value_type) { + fbb_.AddElement(Primitive::VT_VALUE_TYPE, static_cast(value_type), 0); + } + void add_value(flatbuffers::Offset value) { + fbb_.AddOffset(Primitive::VT_VALUE, value); + } + explicit PrimitiveBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PrimitiveBuilder &operator=(const PrimitiveBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePrimitive( + flatbuffers::FlatBufferBuilder &_fbb, + PrimitiveType value_type = PrimitiveType_NONE, + flatbuffers::Offset value = 0) { + PrimitiveBuilder builder_(_fbb); + builder_.add_value(value); + builder_.add_value_type(value_type); + return builder_.Finish(); +} + +struct CNode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_NODETYPE = 6, + VT_PRIMITIVE = 8, + VT_INPUTINDEX = 10, + VT_OUTPUTINDEX = 12, + VT_QUANTTYPE = 14 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + NodeType nodeType() const { + return static_cast(GetField(VT_NODETYPE, 2)); + } + const Primitive *primitive() const { + return GetPointer(VT_PRIMITIVE); + } + const flatbuffers::Vector *inputIndex() const { + return GetPointer *>(VT_INPUTINDEX); + } + const flatbuffers::Vector *outputIndex() const { + return GetPointer *>(VT_OUTPUTINDEX); + } + QuantType quantType() const { + return static_cast(GetField(VT_QUANTTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_NODETYPE) && + VerifyOffset(verifier, VT_PRIMITIVE) && + verifier.VerifyTable(primitive()) && + VerifyOffset(verifier, VT_INPUTINDEX) && + verifier.VerifyVector(inputIndex()) && + VerifyOffset(verifier, VT_OUTPUTINDEX) && + verifier.VerifyVector(outputIndex()) && + VerifyField(verifier, VT_QUANTTYPE) && + verifier.EndTable(); + } +}; + +struct CNodeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(CNode::VT_NAME, name); + } + void add_nodeType(NodeType nodeType) { + fbb_.AddElement(CNode::VT_NODETYPE, static_cast(nodeType), 2); + } + void add_primitive(flatbuffers::Offset primitive) { + fbb_.AddOffset(CNode::VT_PRIMITIVE, primitive); + } + void add_inputIndex(flatbuffers::Offset> inputIndex) { + fbb_.AddOffset(CNode::VT_INPUTINDEX, inputIndex); + } + void add_outputIndex(flatbuffers::Offset> outputIndex) { + fbb_.AddOffset(CNode::VT_OUTPUTINDEX, outputIndex); + } + void add_quantType(QuantType quantType) { + fbb_.AddElement(CNode::VT_QUANTTYPE, static_cast(quantType), 0); + } + explicit CNodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CNodeBuilder &operator=(const CNodeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCNode( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + NodeType nodeType = NodeType_CNode, + flatbuffers::Offset primitive = 0, + flatbuffers::Offset> inputIndex = 0, + flatbuffers::Offset> outputIndex = 0, + QuantType quantType = QuantType_QUANT_NONE) { + CNodeBuilder builder_(_fbb); + builder_.add_quantType(quantType); + builder_.add_outputIndex(outputIndex); + builder_.add_inputIndex(inputIndex); + builder_.add_primitive(primitive); + builder_.add_nodeType(nodeType); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCNodeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + NodeType nodeType = NodeType_CNode, + flatbuffers::Offset primitive = 0, + const std::vector *inputIndex = nullptr, + const std::vector *outputIndex = nullptr, + QuantType quantType = QuantType_QUANT_NONE) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto inputIndex__ = inputIndex ? _fbb.CreateVector(*inputIndex) : 0; + auto outputIndex__ = outputIndex ? _fbb.CreateVector(*outputIndex) : 0; + return mindspore::schema::CreateCNode( + _fbb, + name__, + nodeType, + primitive, + inputIndex__, + outputIndex__, + quantType); +} + +struct MetaGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_FMKTYPE = 6, + VT_INPUTINDEX = 8, + VT_OUTPUTINDEX = 10, + VT_MEMPOOLSIZE = 12, + VT_NODES = 14, + VT_ALLTENSORS = 16 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + int32_t fmkType() const { + return GetField(VT_FMKTYPE, 0); + } + const flatbuffers::Vector *inputIndex() const { + return GetPointer *>(VT_INPUTINDEX); + } + const flatbuffers::Vector *outputIndex() const { + return GetPointer *>(VT_OUTPUTINDEX); + } + uint32_t mempoolSize() const { + return GetField(VT_MEMPOOLSIZE, 0); + } + const flatbuffers::Vector> *nodes() const { + return GetPointer> *>(VT_NODES); + } + const flatbuffers::Vector> *allTensors() const { + return GetPointer> *>(VT_ALLTENSORS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_FMKTYPE) && + VerifyOffset(verifier, VT_INPUTINDEX) && + verifier.VerifyVector(inputIndex()) && + VerifyOffset(verifier, VT_OUTPUTINDEX) && + verifier.VerifyVector(outputIndex()) && + VerifyField(verifier, VT_MEMPOOLSIZE) && + VerifyOffset(verifier, VT_NODES) && + verifier.VerifyVector(nodes()) && + verifier.VerifyVectorOfTables(nodes()) && + VerifyOffset(verifier, VT_ALLTENSORS) && + verifier.VerifyVector(allTensors()) && + verifier.VerifyVectorOfTables(allTensors()) && + verifier.EndTable(); + } +}; + +struct MetaGraphBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(MetaGraph::VT_NAME, name); + } + void add_fmkType(int32_t fmkType) { + fbb_.AddElement(MetaGraph::VT_FMKTYPE, fmkType, 0); + } + void add_inputIndex(flatbuffers::Offset> inputIndex) { + fbb_.AddOffset(MetaGraph::VT_INPUTINDEX, inputIndex); + } + void add_outputIndex(flatbuffers::Offset> outputIndex) { + fbb_.AddOffset(MetaGraph::VT_OUTPUTINDEX, outputIndex); + } + void add_mempoolSize(uint32_t mempoolSize) { + fbb_.AddElement(MetaGraph::VT_MEMPOOLSIZE, mempoolSize, 0); + } + void add_nodes(flatbuffers::Offset>> nodes) { + fbb_.AddOffset(MetaGraph::VT_NODES, nodes); + } + void add_allTensors(flatbuffers::Offset>> allTensors) { + fbb_.AddOffset(MetaGraph::VT_ALLTENSORS, allTensors); + } + explicit MetaGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MetaGraphBuilder &operator=(const MetaGraphBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMetaGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + int32_t fmkType = 0, + flatbuffers::Offset> inputIndex = 0, + flatbuffers::Offset> outputIndex = 0, + uint32_t mempoolSize = 0, + flatbuffers::Offset>> nodes = 0, + flatbuffers::Offset>> allTensors = 0) { + MetaGraphBuilder builder_(_fbb); + builder_.add_allTensors(allTensors); + builder_.add_nodes(nodes); + builder_.add_mempoolSize(mempoolSize); + builder_.add_outputIndex(outputIndex); + builder_.add_inputIndex(inputIndex); + builder_.add_fmkType(fmkType); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMetaGraphDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + int32_t fmkType = 0, + const std::vector *inputIndex = nullptr, + const std::vector *outputIndex = nullptr, + uint32_t mempoolSize = 0, + const std::vector> *nodes = nullptr, + const std::vector> *allTensors = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto inputIndex__ = inputIndex ? _fbb.CreateVector(*inputIndex) : 0; + auto outputIndex__ = outputIndex ? _fbb.CreateVector(*outputIndex) : 0; + auto nodes__ = nodes ? _fbb.CreateVector>(*nodes) : 0; + auto allTensors__ = allTensors ? _fbb.CreateVector>(*allTensors) : 0; + return mindspore::schema::CreateMetaGraph( + _fbb, + name__, + fmkType, + inputIndex__, + outputIndex__, + mempoolSize, + nodes__, + allTensors__); +} + +inline bool VerifyPrimitiveType(flatbuffers::Verifier &verifier, const void *obj, PrimitiveType type) { + switch (type) { + case PrimitiveType_NONE: { + return true; + } + case PrimitiveType_Concat: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SoftMax: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Activation: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Conv2D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FusedBatchNorm: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_CaffeBatchNorm: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_BiasAdd: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Pooling: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_DepthwiseConv2D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_DeDepthwiseConv2D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Resize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_DetectionPostProcess: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FullConnection: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Mean: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_DeConv2D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Scale: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Reshape: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Eltwise: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_NetOutput: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Add: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Sub: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_MatMul: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_StridedSlice: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Power: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Slice: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Stack: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Mul: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_RealDiv: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Pad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Maximum: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Minimum: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_CaffePReLU: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LeakyReLU: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ArgMax: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ArgMin: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Exp: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Crop: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Range: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Rsqrt: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ExpandDims: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Tile: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Cast: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Shape: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Nchw2Nhwc: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Nhwc2Nchw: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_QuantDTypeCast: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Split: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Permute: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FakeQuantWithMinMaxVars: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Equal: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Less: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Greater: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_NotEqual: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LessEqual: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_GreaterEqual: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Min: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Floor: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Abs: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Neg: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Cos: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Sin: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Sqrt: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Square: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Constant: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Log: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Tan: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Atan: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Asin: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Clip: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Transpose: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Squeeze: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Unsqueeze: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Upsample: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Dropout: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Broadcast: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_BroadcastTo: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Lrn: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Prelu: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ZerosLike: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_TopK: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SpaceToDepth: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SpaceToBatch: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SparseToDense: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ReverseSequence: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Rank: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Gather: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_GatherNd: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Fill: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Elu: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_DepthToSpace: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_BatchToSpace: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_AddN: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Ceil: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_EmbeddingLookup: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_EmbeddingLookupSparse: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FloorDiv: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FloorMod: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_L2Norm: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LocalResponseNormalization: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_MatrixDiag: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Reduce: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Reverse: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Round: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Select: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Scatter: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ScatterND: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Unique: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Unstack: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LogicalAnd: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LogicalOr: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LogicalXor: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_LogicalNot: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_OnnxInt8Quantize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_OnnxInt8Dequantize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FakeQuantWithMinMax: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_FakeQuantWithMinMaxPerChannel: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_BatchNormFold: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_MulFold: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_AddFold: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SquaredDifference: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Flatten: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_TupleGetItem: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Div: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Where: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_OneHot: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Lstm: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Conv2DGradFilter: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_Conv2DGradInput: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_PoolingGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_BNGradInput: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_OptMomentum: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_BiasGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SoftmaxCrossEntropy: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_AddGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SubGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_MulGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_DivGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_PowerGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_ActivationGrad: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_PriorBox: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_SpaceToBatchND: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case PrimitiveType_TopKV2: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return false; + } +} + +inline bool VerifyPrimitiveTypeVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyPrimitiveType( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline const mindspore::schema::MetaGraph *GetMetaGraph(const void *buf) { + return flatbuffers::GetRoot(buf); +} + +inline const mindspore::schema::MetaGraph *GetSizePrefixedMetaGraph(const void *buf) { + return flatbuffers::GetSizePrefixedRoot(buf); +} + +inline bool VerifyMetaGraphBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(nullptr); +} + +inline bool VerifySizePrefixedMetaGraphBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(nullptr); +} + +inline void FinishMetaGraphBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.Finish(root); +} + +inline void FinishSizePrefixedMetaGraphBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root); +} + +} // namespace schema +} // namespace mindspore + +#endif // FLATBUFFERS_GENERATED_MODEL_MINDSPORE_SCHEMA_H_ diff --git a/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/schema/ops_generated.h b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/schema/ops_generated.h new file mode 100644 index 0000000000000000000000000000000000000000..66a0e5f0597ae0dc942fc21b86998fa70af37657 --- /dev/null +++ b/model_zoo/official/lite/app/src/main/cpp/include/MindSpore/schema/ops_generated.h @@ -0,0 +1,8431 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_OPS_MINDSPORE_SCHEMA_H_ +#define FLATBUFFERS_GENERATED_OPS_MINDSPORE_SCHEMA_H_ + +#include "flatbuffers/flatbuffers.h" + +namespace mindspore { +namespace schema { + +struct Pad; + +struct Maximum; + +struct Minimum; + +struct Flatten; + +struct Concat; + +struct SoftMax; + +struct Activation; + +struct ActivationGrad; + +struct Conv2D; + +struct Conv2DGradFilter; + +struct Conv2DGradInput; + +struct FusedBatchNorm; + +struct CaffeBatchNorm; + +struct BiasGrad; + +struct SoftmaxCrossEntropy; + +struct PoolingGrad; + +struct Shape; + +struct Nchw2Nhwc; + +struct Nhwc2Nchw; + +struct FakeQuantWithMinMaxVars; + +struct BiasAdd; + +struct Pooling; + +struct DepthwiseConv2D; + +struct DeDepthwiseConv2D; + +struct Resize; + +struct DetectionPostProcess; + +struct FullConnection; + +struct Mean; + +struct DeConv2D; + +struct BNGradInput; + +struct Scale; + +struct Eltwise; + +struct Add; + +struct Sub; + +struct Mul; + +struct Div; + +struct AddGrad; + +struct SubGrad; + +struct MulGrad; + +struct DivGrad; + +struct RealDiv; + +struct Rsqrt; + +struct Equal; + +struct Less; + +struct Greater; + +struct NotEqual; + +struct LessEqual; + +struct GreaterEqual; + +struct Min; + +struct Slice; + +struct Floor; + +struct Abs; + +struct Neg; + +struct Exp; + +struct Cos; + +struct Sin; + +struct Sqrt; + +struct Square; + +struct Ceil; + +struct Log; + +struct Tan; + +struct Atan; + +struct Asin; + +struct Reshape; + +struct Power; + +struct PowerGrad; + +struct ArgMax; + +struct ArgMin; + +struct NetOutput; + +struct MatMul; + +struct CaffePReLU; + +struct LeakyReLU; + +struct StridedSlice; + +struct Stack; + +struct Range; + +struct ExpandDims; + +struct Tile; + +struct Cast; + +struct QuantDTypeCast; + +struct Split; + +struct Crop; + +struct Permute; + +struct Clip; + +struct Constant; + +struct Elu; + +struct Broadcast; + +struct BroadcastTo; + +struct Lrn; + +struct Reduce; + +struct Prelu; + +struct Transpose; + +struct Squeeze; + +struct Unsqueeze; + +struct Upsample; + +struct Dropout; + +struct LocalResponseNormalization; + +struct ZerosLike; + +struct TopK; + +struct SpaceToDepth; + +struct SpaceToBatch; + +struct SparseToDense; + +struct ReverseSequence; + +struct Rank; + +struct Gather; + +struct GatherNd; + +struct Fill; + +struct DepthToSpace; + +struct BatchToSpace; + +struct AddN; + +struct EmbeddingLookup; + +struct EmbeddingLookupSparse; + +struct FloorDiv; + +struct FloorMod; + +struct L2Norm; + +struct LogicalAnd; + +struct LogicalOr; + +struct LogicalXor; + +struct LogicalNot; + +struct MatrixDiag; + +struct Select; + +struct TfReduce; + +struct Reverse; + +struct Round; + +struct Scatter; + +struct ScatterND; + +struct Unique; + +struct Unstack; + +struct OnnxInt8Quantize; + +struct OnnxInt8Dequantize; + +struct FakeQuantWithMinMax; + +struct FakeQuantWithMinMaxPerChannel; + +struct BatchNormFold; + +struct MulFold; + +struct AddFold; + +struct SquaredDifference; + +struct TupleGetItem; + +struct OptMomentum; + +struct Where; + +struct OneHot; + +struct Lstm; + +struct PriorBox; + +struct SpaceToBatchND; + +struct TopKV2; + +enum ResizeMethod { + ResizeMethod_UNKNOW = -1, + ResizeMethod_BILINEAR = 0, + ResizeMethod_NEAREST_NEIGHBOR = 1, + ResizeMethod_MIN = ResizeMethod_UNKNOW, + ResizeMethod_MAX = ResizeMethod_NEAREST_NEIGHBOR +}; + +inline const ResizeMethod (&EnumValuesResizeMethod())[3] { + static const ResizeMethod values[] = { + ResizeMethod_UNKNOW, + ResizeMethod_BILINEAR, + ResizeMethod_NEAREST_NEIGHBOR + }; + return values; +} + +inline const char * const *EnumNamesResizeMethod() { + static const char * const names[] = { + "UNKNOW", + "BILINEAR", + "NEAREST_NEIGHBOR", + nullptr + }; + return names; +} + +inline const char *EnumNameResizeMethod(ResizeMethod e) { + if (e < ResizeMethod_UNKNOW || e > ResizeMethod_NEAREST_NEIGHBOR) return ""; + const size_t index = static_cast(e) - static_cast(ResizeMethod_UNKNOW); + return EnumNamesResizeMethod()[index]; +} + +enum Format { + Format_NCHW = 0, + Format_NHWC = 1, + Format_NHWC4 = 2, + Format_HWKC = 3, + Format_HWCK = 4, + Format_KCHW = 5, + Format_CKHW = 6, + Format_KHWC = 7, + Format_CHWK = 8, + Format_NC4HW4 = 100, + Format_NUM_OF_FORMAT = 101, + Format_MIN = Format_NCHW, + Format_MAX = Format_NUM_OF_FORMAT +}; + +inline const Format (&EnumValuesFormat())[11] { + static const Format values[] = { + Format_NCHW, + Format_NHWC, + Format_NHWC4, + Format_HWKC, + Format_HWCK, + Format_KCHW, + Format_CKHW, + Format_KHWC, + Format_CHWK, + Format_NC4HW4, + Format_NUM_OF_FORMAT + }; + return values; +} + +inline const char *EnumNameFormat(Format e) { + switch (e) { + case Format_NCHW: return "NCHW"; + case Format_NHWC: return "NHWC"; + case Format_NHWC4: return "NHWC4"; + case Format_HWKC: return "HWKC"; + case Format_HWCK: return "HWCK"; + case Format_KCHW: return "KCHW"; + case Format_CKHW: return "CKHW"; + case Format_KHWC: return "KHWC"; + case Format_CHWK: return "CHWK"; + case Format_NC4HW4: return "NC4HW4"; + case Format_NUM_OF_FORMAT: return "NUM_OF_FORMAT"; + default: return ""; + } +} + +enum ActivationType { + ActivationType_NO_ACTIVATION = 0, + ActivationType_RELU = 1, + ActivationType_SIGMOID = 2, + ActivationType_RELU6 = 3, + ActivationType_ELU = 4, + ActivationType_LEAKY_RELU = 5, + ActivationType_ABS = 6, + ActivationType_RELU1 = 7, + ActivationType_SOFTSIGN = 8, + ActivationType_SOFTPLUS = 9, + ActivationType_TANH = 10, + ActivationType_SELU = 11, + ActivationType_HSWISH = 12, + ActivationType_HSIGMOID = 13, + ActivationType_THRESHOLDRELU = 14, + ActivationType_LINEAR = 15, + ActivationType_UNKNOW = 16, + ActivationType_MIN = ActivationType_NO_ACTIVATION, + ActivationType_MAX = ActivationType_UNKNOW +}; + +inline const ActivationType (&EnumValuesActivationType())[17] { + static const ActivationType values[] = { + ActivationType_NO_ACTIVATION, + ActivationType_RELU, + ActivationType_SIGMOID, + ActivationType_RELU6, + ActivationType_ELU, + ActivationType_LEAKY_RELU, + ActivationType_ABS, + ActivationType_RELU1, + ActivationType_SOFTSIGN, + ActivationType_SOFTPLUS, + ActivationType_TANH, + ActivationType_SELU, + ActivationType_HSWISH, + ActivationType_HSIGMOID, + ActivationType_THRESHOLDRELU, + ActivationType_LINEAR, + ActivationType_UNKNOW + }; + return values; +} + +inline const char * const *EnumNamesActivationType() { + static const char * const names[] = { + "NO_ACTIVATION", + "RELU", + "SIGMOID", + "RELU6", + "ELU", + "LEAKY_RELU", + "ABS", + "RELU1", + "SOFTSIGN", + "SOFTPLUS", + "TANH", + "SELU", + "HSWISH", + "HSIGMOID", + "THRESHOLDRELU", + "LINEAR", + "UNKNOW", + nullptr + }; + return names; +} + +inline const char *EnumNameActivationType(ActivationType e) { + if (e < ActivationType_NO_ACTIVATION || e > ActivationType_UNKNOW) return ""; + const size_t index = static_cast(e); + return EnumNamesActivationType()[index]; +} + +enum ActivationGradType { + ActivationGradType_NO_ACTIVATION = 0, + ActivationGradType_RELU = 1, + ActivationGradType_SIGMOID = 2, + ActivationGradType_RELU6 = 3, + ActivationGradType_ELU = 4, + ActivationGradType_LEAKY_RELU = 5, + ActivationGradType_ABS = 6, + ActivationGradType_RELU1 = 7, + ActivationGradType_SOFTSIGN = 8, + ActivationGradType_SOFTPLUS = 9, + ActivationGradType_TANH = 10, + ActivationGradType_SELU = 11, + ActivationGradType_HSWISH = 12, + ActivationGradType_HSIGMOID = 13, + ActivationGradType_THRESHOLDRELU = 14, + ActivationGradType_LINEAR = 15, + ActivationGradType_UNKNOW = 16, + ActivationGradType_MIN = ActivationGradType_NO_ACTIVATION, + ActivationGradType_MAX = ActivationGradType_UNKNOW +}; + +inline const ActivationGradType (&EnumValuesActivationGradType())[17] { + static const ActivationGradType values[] = { + ActivationGradType_NO_ACTIVATION, + ActivationGradType_RELU, + ActivationGradType_SIGMOID, + ActivationGradType_RELU6, + ActivationGradType_ELU, + ActivationGradType_LEAKY_RELU, + ActivationGradType_ABS, + ActivationGradType_RELU1, + ActivationGradType_SOFTSIGN, + ActivationGradType_SOFTPLUS, + ActivationGradType_TANH, + ActivationGradType_SELU, + ActivationGradType_HSWISH, + ActivationGradType_HSIGMOID, + ActivationGradType_THRESHOLDRELU, + ActivationGradType_LINEAR, + ActivationGradType_UNKNOW + }; + return values; +} + +inline const char * const *EnumNamesActivationGradType() { + static const char * const names[] = { + "NO_ACTIVATION", + "RELU", + "SIGMOID", + "RELU6", + "ELU", + "LEAKY_RELU", + "ABS", + "RELU1", + "SOFTSIGN", + "SOFTPLUS", + "TANH", + "SELU", + "HSWISH", + "HSIGMOID", + "THRESHOLDRELU", + "LINEAR", + "UNKNOW", + nullptr + }; + return names; +} + +inline const char *EnumNameActivationGradType(ActivationGradType e) { + if (e < ActivationGradType_NO_ACTIVATION || e > ActivationGradType_UNKNOW) return ""; + const size_t index = static_cast(e); + return EnumNamesActivationGradType()[index]; +} + +enum ReduceType { + ReduceType_REDUCE_MAX = 0, + ReduceType_REDUCE_MEAN = 1, + ReduceType_REDUCE_ALL = 2, + ReduceType_REDUCE_ANY = 3, + ReduceType_REDUCE_LOG_SUM_EXP = 4, + ReduceType_REDUCE_PROD = 5, + ReduceType_REDUCE_SUM = 6, + ReduceType_UNKNOW = 7, + ReduceType_MIN = ReduceType_REDUCE_MAX, + ReduceType_MAX = ReduceType_UNKNOW +}; + +inline const ReduceType (&EnumValuesReduceType())[8] { + static const ReduceType values[] = { + ReduceType_REDUCE_MAX, + ReduceType_REDUCE_MEAN, + ReduceType_REDUCE_ALL, + ReduceType_REDUCE_ANY, + ReduceType_REDUCE_LOG_SUM_EXP, + ReduceType_REDUCE_PROD, + ReduceType_REDUCE_SUM, + ReduceType_UNKNOW + }; + return values; +} + +inline const char * const *EnumNamesReduceType() { + static const char * const names[] = { + "REDUCE_MAX", + "REDUCE_MEAN", + "REDUCE_ALL", + "REDUCE_ANY", + "REDUCE_LOG_SUM_EXP", + "REDUCE_PROD", + "REDUCE_SUM", + "UNKNOW", + nullptr + }; + return names; +} + +inline const char *EnumNameReduceType(ReduceType e) { + if (e < ReduceType_REDUCE_MAX || e > ReduceType_UNKNOW) return ""; + const size_t index = static_cast(e); + return EnumNamesReduceType()[index]; +} + +enum PoolMode { + PoolMode_MAX_POOLING = 0, + PoolMode_MEAN_POOLING = 1, + PoolMode_MIN = PoolMode_MAX_POOLING, + PoolMode_MAX = PoolMode_MEAN_POOLING +}; + +inline const PoolMode (&EnumValuesPoolMode())[2] { + static const PoolMode values[] = { + PoolMode_MAX_POOLING, + PoolMode_MEAN_POOLING + }; + return values; +} + +inline const char * const *EnumNamesPoolMode() { + static const char * const names[] = { + "MAX_POOLING", + "MEAN_POOLING", + nullptr + }; + return names; +} + +inline const char *EnumNamePoolMode(PoolMode e) { + if (e < PoolMode_MAX_POOLING || e > PoolMode_MEAN_POOLING) return ""; + const size_t index = static_cast(e); + return EnumNamesPoolMode()[index]; +} + +enum EltwiseMode { + EltwiseMode_PROD = 0, + EltwiseMode_SUM = 1, + EltwiseMode_MAXIMUM = 2, + EltwiseMode_UNKNOW = 3, + EltwiseMode_MIN = EltwiseMode_PROD, + EltwiseMode_MAX = EltwiseMode_UNKNOW +}; + +inline const EltwiseMode (&EnumValuesEltwiseMode())[4] { + static const EltwiseMode values[] = { + EltwiseMode_PROD, + EltwiseMode_SUM, + EltwiseMode_MAXIMUM, + EltwiseMode_UNKNOW + }; + return values; +} + +inline const char * const *EnumNamesEltwiseMode() { + static const char * const names[] = { + "PROD", + "SUM", + "MAXIMUM", + "UNKNOW", + nullptr + }; + return names; +} + +inline const char *EnumNameEltwiseMode(EltwiseMode e) { + if (e < EltwiseMode_PROD || e > EltwiseMode_UNKNOW) return ""; + const size_t index = static_cast(e); + return EnumNamesEltwiseMode()[index]; +} + +enum PadMode { + PadMode_NOTSET = 0, + PadMode_SAME = 1, + PadMode_VALID = 2, + PadMode_CAFFE = 4, + PadMode_MIN = PadMode_NOTSET, + PadMode_MAX = PadMode_CAFFE +}; + +inline const PadMode (&EnumValuesPadMode())[4] { + static const PadMode values[] = { + PadMode_NOTSET, + PadMode_SAME, + PadMode_VALID, + PadMode_CAFFE + }; + return values; +} + +inline const char * const *EnumNamesPadMode() { + static const char * const names[] = { + "NOTSET", + "SAME", + "VALID", + "", + "CAFFE", + nullptr + }; + return names; +} + +inline const char *EnumNamePadMode(PadMode e) { + if (e < PadMode_NOTSET || e > PadMode_CAFFE) return ""; + const size_t index = static_cast(e); + return EnumNamesPadMode()[index]; +} + +enum RoundMode { + RoundMode_FLOOR = 0, + RoundMode_CEIL = 1, + RoundMode_MIN = RoundMode_FLOOR, + RoundMode_MAX = RoundMode_CEIL +}; + +inline const RoundMode (&EnumValuesRoundMode())[2] { + static const RoundMode values[] = { + RoundMode_FLOOR, + RoundMode_CEIL + }; + return values; +} + +inline const char * const *EnumNamesRoundMode() { + static const char * const names[] = { + "FLOOR", + "CEIL", + nullptr + }; + return names; +} + +inline const char *EnumNameRoundMode(RoundMode e) { + if (e < RoundMode_FLOOR || e > RoundMode_CEIL) return ""; + const size_t index = static_cast(e); + return EnumNamesRoundMode()[index]; +} + +enum PaddingMode { + PaddingMode_CONSTANT = 0, + PaddingMode_REFLECT = 1, + PaddingMode_SYMMETRIC = 2, + PaddingMode_MODE_RESERVED = 3, + PaddingMode_MIN = PaddingMode_CONSTANT, + PaddingMode_MAX = PaddingMode_MODE_RESERVED +}; + +inline const PaddingMode (&EnumValuesPaddingMode())[4] { + static const PaddingMode values[] = { + PaddingMode_CONSTANT, + PaddingMode_REFLECT, + PaddingMode_SYMMETRIC, + PaddingMode_MODE_RESERVED + }; + return values; +} + +inline const char * const *EnumNamesPaddingMode() { + static const char * const names[] = { + "CONSTANT", + "REFLECT", + "SYMMETRIC", + "MODE_RESERVED", + nullptr + }; + return names; +} + +inline const char *EnumNamePaddingMode(PaddingMode e) { + if (e < PaddingMode_CONSTANT || e > PaddingMode_MODE_RESERVED) return ""; + const size_t index = static_cast(e); + return EnumNamesPaddingMode()[index]; +} + +enum ReduceMode { + ReduceMode_ReduceMean = 0, + ReduceMode_ReduceMax = 1, + ReduceMode_ReduceMin = 2, + ReduceMode_ReduceProd = 3, + ReduceMode_ReduceSum = 4, + ReduceMode_ReduceSumSquare = 5, + ReduceMode_MIN = ReduceMode_ReduceMean, + ReduceMode_MAX = ReduceMode_ReduceSumSquare +}; + +inline const ReduceMode (&EnumValuesReduceMode())[6] { + static const ReduceMode values[] = { + ReduceMode_ReduceMean, + ReduceMode_ReduceMax, + ReduceMode_ReduceMin, + ReduceMode_ReduceProd, + ReduceMode_ReduceSum, + ReduceMode_ReduceSumSquare + }; + return values; +} + +inline const char * const *EnumNamesReduceMode() { + static const char * const names[] = { + "ReduceMean", + "ReduceMax", + "ReduceMin", + "ReduceProd", + "ReduceSum", + "ReduceSumSquare", + nullptr + }; + return names; +} + +inline const char *EnumNameReduceMode(ReduceMode e) { + if (e < ReduceMode_ReduceMean || e > ReduceMode_ReduceSumSquare) return ""; + const size_t index = static_cast(e); + return EnumNamesReduceMode()[index]; +} + +struct Pad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDINGS = 4, + VT_PADDINGMODE = 6, + VT_CONSTANTVALUE = 8 + }; + const flatbuffers::Vector *paddings() const { + return GetPointer *>(VT_PADDINGS); + } + PaddingMode paddingMode() const { + return static_cast(GetField(VT_PADDINGMODE, 0)); + } + float constantValue() const { + return GetField(VT_CONSTANTVALUE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_PADDINGS) && + verifier.VerifyVector(paddings()) && + VerifyField(verifier, VT_PADDINGMODE) && + VerifyField(verifier, VT_CONSTANTVALUE) && + verifier.EndTable(); + } +}; + +struct PadBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_paddings(flatbuffers::Offset> paddings) { + fbb_.AddOffset(Pad::VT_PADDINGS, paddings); + } + void add_paddingMode(PaddingMode paddingMode) { + fbb_.AddElement(Pad::VT_PADDINGMODE, static_cast(paddingMode), 0); + } + void add_constantValue(float constantValue) { + fbb_.AddElement(Pad::VT_CONSTANTVALUE, constantValue, 0.0f); + } + explicit PadBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PadBuilder &operator=(const PadBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePad( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> paddings = 0, + PaddingMode paddingMode = PaddingMode_CONSTANT, + float constantValue = 0.0f) { + PadBuilder builder_(_fbb); + builder_.add_constantValue(constantValue); + builder_.add_paddings(paddings); + builder_.add_paddingMode(paddingMode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePadDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *paddings = nullptr, + PaddingMode paddingMode = PaddingMode_CONSTANT, + float constantValue = 0.0f) { + auto paddings__ = paddings ? _fbb.CreateVector(*paddings) : 0; + return mindspore::schema::CreatePad( + _fbb, + paddings__, + paddingMode, + constantValue); +} + +struct Maximum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MaximumBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MaximumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MaximumBuilder &operator=(const MaximumBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMaximum( + flatbuffers::FlatBufferBuilder &_fbb) { + MaximumBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Minimum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MinimumBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MinimumBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MinimumBuilder &operator=(const MinimumBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMinimum( + flatbuffers::FlatBufferBuilder &_fbb) { + MinimumBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Flatten FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FlattenBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FlattenBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FlattenBuilder &operator=(const FlattenBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFlatten( + flatbuffers::FlatBufferBuilder &_fbb) { + FlattenBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Concat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_N = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + int32_t n() const { + return GetField(VT_N, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_N) && + verifier.EndTable(); + } +}; + +struct ConcatBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Concat::VT_AXIS, axis, 0); + } + void add_n(int32_t n) { + fbb_.AddElement(Concat::VT_N, n, 0); + } + explicit ConcatBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ConcatBuilder &operator=(const ConcatBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConcat( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t n = 0) { + ConcatBuilder builder_(_fbb); + builder_.add_n(n); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct SoftMax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct SoftMaxBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(SoftMax::VT_AXIS, axis, 0); + } + explicit SoftMaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SoftMaxBuilder &operator=(const SoftMaxBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSoftMax( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) { + SoftMaxBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct Activation FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + ActivationType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } +}; + +struct ActivationBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(ActivationType type) { + fbb_.AddElement(Activation::VT_TYPE, static_cast(type), 0); + } + explicit ActivationBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ActivationBuilder &operator=(const ActivationBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateActivation( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationType type = ActivationType_NO_ACTIVATION) { + ActivationBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +struct ActivationGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + ActivationGradType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } +}; + +struct ActivationGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(ActivationGradType type) { + fbb_.AddElement(ActivationGrad::VT_TYPE, static_cast(type), 0); + } + explicit ActivationGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ActivationGradBuilder &operator=(const ActivationGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateActivationGrad( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationGradType type = ActivationGradType_NO_ACTIVATION) { + ActivationGradBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +struct Conv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_GROUP = 6, + VT_CHANNELIN = 8, + VT_CHANNELOUT = 10, + VT_KERNELW = 12, + VT_KERNELH = 14, + VT_STRIDEW = 16, + VT_STRIDEH = 18, + VT_PADMODE = 20, + VT_PADUP = 22, + VT_PADDOWN = 24, + VT_PADLEFT = 26, + VT_PADRIGHT = 28, + VT_DILATEW = 30, + VT_DILATEH = 32, + VT_HASBIAS = 34, + VT_ACTIVATIONTYPE = 36 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t group() const { + return GetField(VT_GROUP, 0); + } + int32_t channelIn() const { + return GetField(VT_CHANNELIN, 0); + } + int32_t channelOut() const { + return GetField(VT_CHANNELOUT, 0); + } + int32_t kernelW() const { + return GetField(VT_KERNELW, 0); + } + int32_t kernelH() const { + return GetField(VT_KERNELH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + int32_t dilateW() const { + return GetField(VT_DILATEW, 0); + } + int32_t dilateH() const { + return GetField(VT_DILATEH, 0); + } + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + ActivationType activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_CHANNELIN) && + VerifyField(verifier, VT_CHANNELOUT) && + VerifyField(verifier, VT_KERNELW) && + VerifyField(verifier, VT_KERNELH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_DILATEW) && + VerifyField(verifier, VT_DILATEH) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + verifier.EndTable(); + } +}; + +struct Conv2DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Conv2D::VT_FORMAT, static_cast(format), 0); + } + void add_group(int32_t group) { + fbb_.AddElement(Conv2D::VT_GROUP, group, 0); + } + void add_channelIn(int32_t channelIn) { + fbb_.AddElement(Conv2D::VT_CHANNELIN, channelIn, 0); + } + void add_channelOut(int32_t channelOut) { + fbb_.AddElement(Conv2D::VT_CHANNELOUT, channelOut, 0); + } + void add_kernelW(int32_t kernelW) { + fbb_.AddElement(Conv2D::VT_KERNELW, kernelW, 0); + } + void add_kernelH(int32_t kernelH) { + fbb_.AddElement(Conv2D::VT_KERNELH, kernelH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(Conv2D::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(Conv2D::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(Conv2D::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(Conv2D::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(Conv2D::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(Conv2D::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(Conv2D::VT_PADRIGHT, padRight, 0); + } + void add_dilateW(int32_t dilateW) { + fbb_.AddElement(Conv2D::VT_DILATEW, dilateW, 0); + } + void add_dilateH(int32_t dilateH) { + fbb_.AddElement(Conv2D::VT_DILATEH, dilateH, 0); + } + void add_hasBias(bool hasBias) { + fbb_.AddElement(Conv2D::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_activationType(ActivationType activationType) { + fbb_.AddElement(Conv2D::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + explicit Conv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Conv2DBuilder &operator=(const Conv2DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2D( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t group = 0, + int32_t channelIn = 0, + int32_t channelOut = 0, + int32_t kernelW = 0, + int32_t kernelH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + int32_t dilateW = 0, + int32_t dilateH = 0, + bool hasBias = false, + ActivationType activationType = ActivationType_NO_ACTIVATION) { + Conv2DBuilder builder_(_fbb); + builder_.add_dilateH(dilateH); + builder_.add_dilateW(dilateW); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_kernelH(kernelH); + builder_.add_kernelW(kernelW); + builder_.add_channelOut(channelOut); + builder_.add_channelIn(channelIn); + builder_.add_group(group); + builder_.add_format(format); + builder_.add_activationType(activationType); + builder_.add_hasBias(hasBias); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +struct Conv2DGradFilter FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_GROUP = 6, + VT_CHANNELIN = 8, + VT_CHANNELOUT = 10, + VT_KERNELW = 12, + VT_KERNELH = 14, + VT_STRIDEW = 16, + VT_STRIDEH = 18, + VT_PADMODE = 20, + VT_PADUP = 22, + VT_PADDOWN = 24, + VT_PADLEFT = 26, + VT_PADRIGHT = 28, + VT_DILATEW = 30, + VT_DILATEH = 32, + VT_HASBIAS = 34, + VT_ACTIVATIONTYPE = 36 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t group() const { + return GetField(VT_GROUP, 0); + } + int32_t channelIn() const { + return GetField(VT_CHANNELIN, 0); + } + int32_t channelOut() const { + return GetField(VT_CHANNELOUT, 0); + } + int32_t kernelW() const { + return GetField(VT_KERNELW, 0); + } + int32_t kernelH() const { + return GetField(VT_KERNELH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + int32_t dilateW() const { + return GetField(VT_DILATEW, 0); + } + int32_t dilateH() const { + return GetField(VT_DILATEH, 0); + } + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + ActivationType activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_CHANNELIN) && + VerifyField(verifier, VT_CHANNELOUT) && + VerifyField(verifier, VT_KERNELW) && + VerifyField(verifier, VT_KERNELH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_DILATEW) && + VerifyField(verifier, VT_DILATEH) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + verifier.EndTable(); + } +}; + +struct Conv2DGradFilterBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Conv2DGradFilter::VT_FORMAT, static_cast(format), 0); + } + void add_group(int32_t group) { + fbb_.AddElement(Conv2DGradFilter::VT_GROUP, group, 0); + } + void add_channelIn(int32_t channelIn) { + fbb_.AddElement(Conv2DGradFilter::VT_CHANNELIN, channelIn, 0); + } + void add_channelOut(int32_t channelOut) { + fbb_.AddElement(Conv2DGradFilter::VT_CHANNELOUT, channelOut, 0); + } + void add_kernelW(int32_t kernelW) { + fbb_.AddElement(Conv2DGradFilter::VT_KERNELW, kernelW, 0); + } + void add_kernelH(int32_t kernelH) { + fbb_.AddElement(Conv2DGradFilter::VT_KERNELH, kernelH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(Conv2DGradFilter::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(Conv2DGradFilter::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(Conv2DGradFilter::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(Conv2DGradFilter::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(Conv2DGradFilter::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(Conv2DGradFilter::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(Conv2DGradFilter::VT_PADRIGHT, padRight, 0); + } + void add_dilateW(int32_t dilateW) { + fbb_.AddElement(Conv2DGradFilter::VT_DILATEW, dilateW, 0); + } + void add_dilateH(int32_t dilateH) { + fbb_.AddElement(Conv2DGradFilter::VT_DILATEH, dilateH, 0); + } + void add_hasBias(bool hasBias) { + fbb_.AddElement(Conv2DGradFilter::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_activationType(ActivationType activationType) { + fbb_.AddElement(Conv2DGradFilter::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + explicit Conv2DGradFilterBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Conv2DGradFilterBuilder &operator=(const Conv2DGradFilterBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2DGradFilter( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t group = 0, + int32_t channelIn = 0, + int32_t channelOut = 0, + int32_t kernelW = 0, + int32_t kernelH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + int32_t dilateW = 0, + int32_t dilateH = 0, + bool hasBias = false, + ActivationType activationType = ActivationType_NO_ACTIVATION) { + Conv2DGradFilterBuilder builder_(_fbb); + builder_.add_dilateH(dilateH); + builder_.add_dilateW(dilateW); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_kernelH(kernelH); + builder_.add_kernelW(kernelW); + builder_.add_channelOut(channelOut); + builder_.add_channelIn(channelIn); + builder_.add_group(group); + builder_.add_format(format); + builder_.add_activationType(activationType); + builder_.add_hasBias(hasBias); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +struct Conv2DGradInput FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_GROUP = 6, + VT_CHANNELIN = 8, + VT_CHANNELOUT = 10, + VT_KERNELW = 12, + VT_KERNELH = 14, + VT_STRIDEW = 16, + VT_STRIDEH = 18, + VT_PADMODE = 20, + VT_PADUP = 22, + VT_PADDOWN = 24, + VT_PADLEFT = 26, + VT_PADRIGHT = 28, + VT_DILATEW = 30, + VT_DILATEH = 32, + VT_HASBIAS = 34, + VT_ACTIVATIONTYPE = 36 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t group() const { + return GetField(VT_GROUP, 0); + } + int32_t channelIn() const { + return GetField(VT_CHANNELIN, 0); + } + int32_t channelOut() const { + return GetField(VT_CHANNELOUT, 0); + } + int32_t kernelW() const { + return GetField(VT_KERNELW, 0); + } + int32_t kernelH() const { + return GetField(VT_KERNELH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + int32_t dilateW() const { + return GetField(VT_DILATEW, 0); + } + int32_t dilateH() const { + return GetField(VT_DILATEH, 0); + } + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + ActivationType activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_CHANNELIN) && + VerifyField(verifier, VT_CHANNELOUT) && + VerifyField(verifier, VT_KERNELW) && + VerifyField(verifier, VT_KERNELH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_DILATEW) && + VerifyField(verifier, VT_DILATEH) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + verifier.EndTable(); + } +}; + +struct Conv2DGradInputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Conv2DGradInput::VT_FORMAT, static_cast(format), 0); + } + void add_group(int32_t group) { + fbb_.AddElement(Conv2DGradInput::VT_GROUP, group, 0); + } + void add_channelIn(int32_t channelIn) { + fbb_.AddElement(Conv2DGradInput::VT_CHANNELIN, channelIn, 0); + } + void add_channelOut(int32_t channelOut) { + fbb_.AddElement(Conv2DGradInput::VT_CHANNELOUT, channelOut, 0); + } + void add_kernelW(int32_t kernelW) { + fbb_.AddElement(Conv2DGradInput::VT_KERNELW, kernelW, 0); + } + void add_kernelH(int32_t kernelH) { + fbb_.AddElement(Conv2DGradInput::VT_KERNELH, kernelH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(Conv2DGradInput::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(Conv2DGradInput::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(Conv2DGradInput::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(Conv2DGradInput::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(Conv2DGradInput::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(Conv2DGradInput::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(Conv2DGradInput::VT_PADRIGHT, padRight, 0); + } + void add_dilateW(int32_t dilateW) { + fbb_.AddElement(Conv2DGradInput::VT_DILATEW, dilateW, 0); + } + void add_dilateH(int32_t dilateH) { + fbb_.AddElement(Conv2DGradInput::VT_DILATEH, dilateH, 0); + } + void add_hasBias(bool hasBias) { + fbb_.AddElement(Conv2DGradInput::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_activationType(ActivationType activationType) { + fbb_.AddElement(Conv2DGradInput::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + explicit Conv2DGradInputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Conv2DGradInputBuilder &operator=(const Conv2DGradInputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2DGradInput( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t group = 0, + int32_t channelIn = 0, + int32_t channelOut = 0, + int32_t kernelW = 0, + int32_t kernelH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + int32_t dilateW = 0, + int32_t dilateH = 0, + bool hasBias = false, + ActivationType activationType = ActivationType_NO_ACTIVATION) { + Conv2DGradInputBuilder builder_(_fbb); + builder_.add_dilateH(dilateH); + builder_.add_dilateW(dilateW); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_kernelH(kernelH); + builder_.add_kernelW(kernelW); + builder_.add_channelOut(channelOut); + builder_.add_channelIn(channelIn); + builder_.add_group(group); + builder_.add_format(format); + builder_.add_activationType(activationType); + builder_.add_hasBias(hasBias); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +struct FusedBatchNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPSILON = 4, + VT_MOMENTUM = 6, + VT_SPATIAL = 8 + }; + float epsilon() const { + return GetField(VT_EPSILON, 0.00001f); + } + float momentum() const { + return GetField(VT_MOMENTUM, 0.9f); + } + int32_t spatial() const { + return GetField(VT_SPATIAL, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPSILON) && + VerifyField(verifier, VT_MOMENTUM) && + VerifyField(verifier, VT_SPATIAL) && + verifier.EndTable(); + } +}; + +struct FusedBatchNormBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) { + fbb_.AddElement(FusedBatchNorm::VT_EPSILON, epsilon, 0.00001f); + } + void add_momentum(float momentum) { + fbb_.AddElement(FusedBatchNorm::VT_MOMENTUM, momentum, 0.9f); + } + void add_spatial(int32_t spatial) { + fbb_.AddElement(FusedBatchNorm::VT_SPATIAL, spatial, 1); + } + explicit FusedBatchNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FusedBatchNormBuilder &operator=(const FusedBatchNormBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFusedBatchNorm( + flatbuffers::FlatBufferBuilder &_fbb, + float epsilon = 0.00001f, + float momentum = 0.9f, + int32_t spatial = 1) { + FusedBatchNormBuilder builder_(_fbb); + builder_.add_spatial(spatial); + builder_.add_momentum(momentum); + builder_.add_epsilon(epsilon); + return builder_.Finish(); +} + +struct CaffeBatchNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPSILON = 4 + }; + float epsilon() const { + return GetField(VT_EPSILON, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPSILON) && + verifier.EndTable(); + } +}; + +struct CaffeBatchNormBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) { + fbb_.AddElement(CaffeBatchNorm::VT_EPSILON, epsilon, 0.0f); + } + explicit CaffeBatchNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CaffeBatchNormBuilder &operator=(const CaffeBatchNormBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCaffeBatchNorm( + flatbuffers::FlatBufferBuilder &_fbb, + float epsilon = 0.0f) { + CaffeBatchNormBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + return builder_.Finish(); +} + +struct BiasGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct BiasGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(BiasGrad::VT_AXIS, axis); + } + explicit BiasGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BiasGradBuilder &operator=(const BiasGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBiasGrad( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + BiasGradBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBiasGradDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateBiasGrad( + _fbb, + axis__); +} + +struct SoftmaxCrossEntropy FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct SoftmaxCrossEntropyBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(SoftmaxCrossEntropy::VT_AXIS, axis); + } + explicit SoftmaxCrossEntropyBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SoftmaxCrossEntropyBuilder &operator=(const SoftmaxCrossEntropyBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSoftmaxCrossEntropy( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + SoftmaxCrossEntropyBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSoftmaxCrossEntropyDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateSoftmaxCrossEntropy( + _fbb, + axis__); +} + +struct PoolingGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_POOLINGMODE = 6, + VT_GLOBAL = 8, + VT_WINDOWW = 10, + VT_WINDOWH = 12, + VT_STRIDEW = 14, + VT_STRIDEH = 16, + VT_PADMODE = 18, + VT_PADUP = 20, + VT_PADDOWN = 22, + VT_PADLEFT = 24, + VT_PADRIGHT = 26, + VT_ROUNDMODE = 28 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + PoolMode poolingMode() const { + return static_cast(GetField(VT_POOLINGMODE, 0)); + } + bool global() const { + return GetField(VT_GLOBAL, 0) != 0; + } + int32_t windowW() const { + return GetField(VT_WINDOWW, 0); + } + int32_t windowH() const { + return GetField(VT_WINDOWH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + RoundMode roundMode() const { + return static_cast(GetField(VT_ROUNDMODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_POOLINGMODE) && + VerifyField(verifier, VT_GLOBAL) && + VerifyField(verifier, VT_WINDOWW) && + VerifyField(verifier, VT_WINDOWH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_ROUNDMODE) && + verifier.EndTable(); + } +}; + +struct PoolingGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(PoolingGrad::VT_FORMAT, static_cast(format), 0); + } + void add_poolingMode(PoolMode poolingMode) { + fbb_.AddElement(PoolingGrad::VT_POOLINGMODE, static_cast(poolingMode), 0); + } + void add_global(bool global) { + fbb_.AddElement(PoolingGrad::VT_GLOBAL, static_cast(global), 0); + } + void add_windowW(int32_t windowW) { + fbb_.AddElement(PoolingGrad::VT_WINDOWW, windowW, 0); + } + void add_windowH(int32_t windowH) { + fbb_.AddElement(PoolingGrad::VT_WINDOWH, windowH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(PoolingGrad::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(PoolingGrad::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(PoolingGrad::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(PoolingGrad::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(PoolingGrad::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(PoolingGrad::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(PoolingGrad::VT_PADRIGHT, padRight, 0); + } + void add_roundMode(RoundMode roundMode) { + fbb_.AddElement(PoolingGrad::VT_ROUNDMODE, static_cast(roundMode), 0); + } + explicit PoolingGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PoolingGradBuilder &operator=(const PoolingGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePoolingGrad( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + PoolMode poolingMode = PoolMode_MAX_POOLING, + bool global = false, + int32_t windowW = 0, + int32_t windowH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + RoundMode roundMode = RoundMode_FLOOR) { + PoolingGradBuilder builder_(_fbb); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_windowH(windowH); + builder_.add_windowW(windowW); + builder_.add_format(format); + builder_.add_roundMode(roundMode); + builder_.add_padMode(padMode); + builder_.add_global(global); + builder_.add_poolingMode(poolingMode); + return builder_.Finish(); +} + +struct Shape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ShapeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ShapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ShapeBuilder &operator=(const ShapeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateShape( + flatbuffers::FlatBufferBuilder &_fbb) { + ShapeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Nchw2Nhwc FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct Nchw2NhwcBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit Nchw2NhwcBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Nchw2NhwcBuilder &operator=(const Nchw2NhwcBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNchw2Nhwc( + flatbuffers::FlatBufferBuilder &_fbb) { + Nchw2NhwcBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Nhwc2Nchw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct Nhwc2NchwBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit Nhwc2NchwBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Nhwc2NchwBuilder &operator=(const Nhwc2NchwBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNhwc2Nchw( + flatbuffers::FlatBufferBuilder &_fbb) { + Nhwc2NchwBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FakeQuantWithMinMaxVars FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NARROWRANGE = 4, + VT_NUMBITS = 6 + }; + bool narrowRange() const { + return GetField(VT_NARROWRANGE, 0) != 0; + } + int32_t numBits() const { + return GetField(VT_NUMBITS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NARROWRANGE) && + VerifyField(verifier, VT_NUMBITS) && + verifier.EndTable(); + } +}; + +struct FakeQuantWithMinMaxVarsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_narrowRange(bool narrowRange) { + fbb_.AddElement(FakeQuantWithMinMaxVars::VT_NARROWRANGE, static_cast(narrowRange), 0); + } + void add_numBits(int32_t numBits) { + fbb_.AddElement(FakeQuantWithMinMaxVars::VT_NUMBITS, numBits, 0); + } + explicit FakeQuantWithMinMaxVarsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FakeQuantWithMinMaxVarsBuilder &operator=(const FakeQuantWithMinMaxVarsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFakeQuantWithMinMaxVars( + flatbuffers::FlatBufferBuilder &_fbb, + bool narrowRange = false, + int32_t numBits = 0) { + FakeQuantWithMinMaxVarsBuilder builder_(_fbb); + builder_.add_numBits(numBits); + builder_.add_narrowRange(narrowRange); + return builder_.Finish(); +} + +struct BiasAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct BiasAddBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(BiasAdd::VT_AXIS, axis); + } + explicit BiasAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BiasAddBuilder &operator=(const BiasAddBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBiasAdd( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + BiasAddBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBiasAddDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateBiasAdd( + _fbb, + axis__); +} + +struct Pooling FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_POOLINGMODE = 6, + VT_GLOBAL = 8, + VT_WINDOWW = 10, + VT_WINDOWH = 12, + VT_STRIDEW = 14, + VT_STRIDEH = 16, + VT_PADMODE = 18, + VT_PADUP = 20, + VT_PADDOWN = 22, + VT_PADLEFT = 24, + VT_PADRIGHT = 26, + VT_ROUNDMODE = 28 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + PoolMode poolingMode() const { + return static_cast(GetField(VT_POOLINGMODE, 0)); + } + bool global() const { + return GetField(VT_GLOBAL, 0) != 0; + } + int32_t windowW() const { + return GetField(VT_WINDOWW, 0); + } + int32_t windowH() const { + return GetField(VT_WINDOWH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + RoundMode roundMode() const { + return static_cast(GetField(VT_ROUNDMODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_POOLINGMODE) && + VerifyField(verifier, VT_GLOBAL) && + VerifyField(verifier, VT_WINDOWW) && + VerifyField(verifier, VT_WINDOWH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_ROUNDMODE) && + verifier.EndTable(); + } +}; + +struct PoolingBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Pooling::VT_FORMAT, static_cast(format), 0); + } + void add_poolingMode(PoolMode poolingMode) { + fbb_.AddElement(Pooling::VT_POOLINGMODE, static_cast(poolingMode), 0); + } + void add_global(bool global) { + fbb_.AddElement(Pooling::VT_GLOBAL, static_cast(global), 0); + } + void add_windowW(int32_t windowW) { + fbb_.AddElement(Pooling::VT_WINDOWW, windowW, 0); + } + void add_windowH(int32_t windowH) { + fbb_.AddElement(Pooling::VT_WINDOWH, windowH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(Pooling::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(Pooling::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(Pooling::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(Pooling::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(Pooling::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(Pooling::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(Pooling::VT_PADRIGHT, padRight, 0); + } + void add_roundMode(RoundMode roundMode) { + fbb_.AddElement(Pooling::VT_ROUNDMODE, static_cast(roundMode), 0); + } + explicit PoolingBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PoolingBuilder &operator=(const PoolingBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePooling( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + PoolMode poolingMode = PoolMode_MAX_POOLING, + bool global = false, + int32_t windowW = 0, + int32_t windowH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + RoundMode roundMode = RoundMode_FLOOR) { + PoolingBuilder builder_(_fbb); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_windowH(windowH); + builder_.add_windowW(windowW); + builder_.add_format(format); + builder_.add_roundMode(roundMode); + builder_.add_padMode(padMode); + builder_.add_global(global); + builder_.add_poolingMode(poolingMode); + return builder_.Finish(); +} + +struct DepthwiseConv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_CHANNELIN = 6, + VT_CHANNELMULTIPLIER = 8, + VT_KERNELW = 10, + VT_KERNELH = 12, + VT_STRIDEW = 14, + VT_STRIDEH = 16, + VT_PADMODE = 18, + VT_PADUP = 20, + VT_PADDOWN = 22, + VT_PADLEFT = 24, + VT_PADRIGHT = 26, + VT_DILATEW = 28, + VT_DILATEH = 30, + VT_HASBIAS = 32, + VT_ACTIVATIONTYPE = 34 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t channelIn() const { + return GetField(VT_CHANNELIN, 0); + } + int32_t channelMultiplier() const { + return GetField(VT_CHANNELMULTIPLIER, 0); + } + int32_t kernelW() const { + return GetField(VT_KERNELW, 0); + } + int32_t kernelH() const { + return GetField(VT_KERNELH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + int32_t dilateW() const { + return GetField(VT_DILATEW, 0); + } + int32_t dilateH() const { + return GetField(VT_DILATEH, 0); + } + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + ActivationType activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_CHANNELIN) && + VerifyField(verifier, VT_CHANNELMULTIPLIER) && + VerifyField(verifier, VT_KERNELW) && + VerifyField(verifier, VT_KERNELH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_DILATEW) && + VerifyField(verifier, VT_DILATEH) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + verifier.EndTable(); + } +}; + +struct DepthwiseConv2DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(DepthwiseConv2D::VT_FORMAT, static_cast(format), 0); + } + void add_channelIn(int32_t channelIn) { + fbb_.AddElement(DepthwiseConv2D::VT_CHANNELIN, channelIn, 0); + } + void add_channelMultiplier(int32_t channelMultiplier) { + fbb_.AddElement(DepthwiseConv2D::VT_CHANNELMULTIPLIER, channelMultiplier, 0); + } + void add_kernelW(int32_t kernelW) { + fbb_.AddElement(DepthwiseConv2D::VT_KERNELW, kernelW, 0); + } + void add_kernelH(int32_t kernelH) { + fbb_.AddElement(DepthwiseConv2D::VT_KERNELH, kernelH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(DepthwiseConv2D::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(DepthwiseConv2D::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(DepthwiseConv2D::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(DepthwiseConv2D::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(DepthwiseConv2D::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(DepthwiseConv2D::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(DepthwiseConv2D::VT_PADRIGHT, padRight, 0); + } + void add_dilateW(int32_t dilateW) { + fbb_.AddElement(DepthwiseConv2D::VT_DILATEW, dilateW, 0); + } + void add_dilateH(int32_t dilateH) { + fbb_.AddElement(DepthwiseConv2D::VT_DILATEH, dilateH, 0); + } + void add_hasBias(bool hasBias) { + fbb_.AddElement(DepthwiseConv2D::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_activationType(ActivationType activationType) { + fbb_.AddElement(DepthwiseConv2D::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + explicit DepthwiseConv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DepthwiseConv2DBuilder &operator=(const DepthwiseConv2DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthwiseConv2D( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t channelIn = 0, + int32_t channelMultiplier = 0, + int32_t kernelW = 0, + int32_t kernelH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + int32_t dilateW = 0, + int32_t dilateH = 0, + bool hasBias = false, + ActivationType activationType = ActivationType_NO_ACTIVATION) { + DepthwiseConv2DBuilder builder_(_fbb); + builder_.add_dilateH(dilateH); + builder_.add_dilateW(dilateW); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_kernelH(kernelH); + builder_.add_kernelW(kernelW); + builder_.add_channelMultiplier(channelMultiplier); + builder_.add_channelIn(channelIn); + builder_.add_format(format); + builder_.add_activationType(activationType); + builder_.add_hasBias(hasBias); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +struct DeDepthwiseConv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_CHANNELIN = 6, + VT_CHANNELMULTIPLIER = 8, + VT_KERNELW = 10, + VT_KERNELH = 12, + VT_STRIDEW = 14, + VT_STRIDEH = 16, + VT_PADMODE = 18, + VT_PADUP = 20, + VT_PADDOWN = 22, + VT_PADLEFT = 24, + VT_PADRIGHT = 26, + VT_DILATEW = 28, + VT_DILATEH = 30, + VT_HASBIAS = 32, + VT_ACTIVATIONTYPE = 34 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t channelIn() const { + return GetField(VT_CHANNELIN, 0); + } + int32_t channelMultiplier() const { + return GetField(VT_CHANNELMULTIPLIER, 0); + } + int32_t kernelW() const { + return GetField(VT_KERNELW, 0); + } + int32_t kernelH() const { + return GetField(VT_KERNELH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + int32_t dilateW() const { + return GetField(VT_DILATEW, 0); + } + int32_t dilateH() const { + return GetField(VT_DILATEH, 0); + } + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + ActivationType activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_CHANNELIN) && + VerifyField(verifier, VT_CHANNELMULTIPLIER) && + VerifyField(verifier, VT_KERNELW) && + VerifyField(verifier, VT_KERNELH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_DILATEW) && + VerifyField(verifier, VT_DILATEH) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + verifier.EndTable(); + } +}; + +struct DeDepthwiseConv2DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(DeDepthwiseConv2D::VT_FORMAT, static_cast(format), 0); + } + void add_channelIn(int32_t channelIn) { + fbb_.AddElement(DeDepthwiseConv2D::VT_CHANNELIN, channelIn, 0); + } + void add_channelMultiplier(int32_t channelMultiplier) { + fbb_.AddElement(DeDepthwiseConv2D::VT_CHANNELMULTIPLIER, channelMultiplier, 0); + } + void add_kernelW(int32_t kernelW) { + fbb_.AddElement(DeDepthwiseConv2D::VT_KERNELW, kernelW, 0); + } + void add_kernelH(int32_t kernelH) { + fbb_.AddElement(DeDepthwiseConv2D::VT_KERNELH, kernelH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(DeDepthwiseConv2D::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(DeDepthwiseConv2D::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(DeDepthwiseConv2D::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(DeDepthwiseConv2D::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(DeDepthwiseConv2D::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(DeDepthwiseConv2D::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(DeDepthwiseConv2D::VT_PADRIGHT, padRight, 0); + } + void add_dilateW(int32_t dilateW) { + fbb_.AddElement(DeDepthwiseConv2D::VT_DILATEW, dilateW, 0); + } + void add_dilateH(int32_t dilateH) { + fbb_.AddElement(DeDepthwiseConv2D::VT_DILATEH, dilateH, 0); + } + void add_hasBias(bool hasBias) { + fbb_.AddElement(DeDepthwiseConv2D::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_activationType(ActivationType activationType) { + fbb_.AddElement(DeDepthwiseConv2D::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + explicit DeDepthwiseConv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DeDepthwiseConv2DBuilder &operator=(const DeDepthwiseConv2DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDeDepthwiseConv2D( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t channelIn = 0, + int32_t channelMultiplier = 0, + int32_t kernelW = 0, + int32_t kernelH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + int32_t dilateW = 0, + int32_t dilateH = 0, + bool hasBias = false, + ActivationType activationType = ActivationType_NO_ACTIVATION) { + DeDepthwiseConv2DBuilder builder_(_fbb); + builder_.add_dilateH(dilateH); + builder_.add_dilateW(dilateW); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_kernelH(kernelH); + builder_.add_kernelW(kernelW); + builder_.add_channelMultiplier(channelMultiplier); + builder_.add_channelIn(channelIn); + builder_.add_format(format); + builder_.add_activationType(activationType); + builder_.add_hasBias(hasBias); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +struct Resize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_METHOD = 6, + VT_NEWHEIGHT = 8, + VT_NEWWIDTH = 10, + VT_ALIGNCORNERS = 12, + VT_PRESERVEASPECTRATIO = 14 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + ResizeMethod method() const { + return static_cast(GetField(VT_METHOD, 0)); + } + int64_t newHeight() const { + return GetField(VT_NEWHEIGHT, 0); + } + int64_t newWidth() const { + return GetField(VT_NEWWIDTH, 0); + } + bool alignCorners() const { + return GetField(VT_ALIGNCORNERS, 0) != 0; + } + bool preserveAspectRatio() const { + return GetField(VT_PRESERVEASPECTRATIO, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_METHOD) && + VerifyField(verifier, VT_NEWHEIGHT) && + VerifyField(verifier, VT_NEWWIDTH) && + VerifyField(verifier, VT_ALIGNCORNERS) && + VerifyField(verifier, VT_PRESERVEASPECTRATIO) && + verifier.EndTable(); + } +}; + +struct ResizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Resize::VT_FORMAT, static_cast(format), 0); + } + void add_method(ResizeMethod method) { + fbb_.AddElement(Resize::VT_METHOD, static_cast(method), 0); + } + void add_newHeight(int64_t newHeight) { + fbb_.AddElement(Resize::VT_NEWHEIGHT, newHeight, 0); + } + void add_newWidth(int64_t newWidth) { + fbb_.AddElement(Resize::VT_NEWWIDTH, newWidth, 0); + } + void add_alignCorners(bool alignCorners) { + fbb_.AddElement(Resize::VT_ALIGNCORNERS, static_cast(alignCorners), 0); + } + void add_preserveAspectRatio(bool preserveAspectRatio) { + fbb_.AddElement(Resize::VT_PRESERVEASPECTRATIO, static_cast(preserveAspectRatio), 0); + } + explicit ResizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ResizeBuilder &operator=(const ResizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResize( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + ResizeMethod method = ResizeMethod_BILINEAR, + int64_t newHeight = 0, + int64_t newWidth = 0, + bool alignCorners = false, + bool preserveAspectRatio = false) { + ResizeBuilder builder_(_fbb); + builder_.add_newWidth(newWidth); + builder_.add_newHeight(newHeight); + builder_.add_format(format); + builder_.add_preserveAspectRatio(preserveAspectRatio); + builder_.add_alignCorners(alignCorners); + builder_.add_method(method); + return builder_.Finish(); +} + +struct DetectionPostProcess FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_INPUTSIZE = 6, + VT_HSCALE = 8, + VT_WSCALE = 10, + VT_XSCALE = 12, + VT_YSCALE = 14, + VT_NMSIOUTHRESHOLD = 16, + VT_NMSSCORETHRESHOLD = 18, + VT_MAXDETECTIONS = 20, + VT_DETECTIONSPRECLASS = 22, + VT_MAXCLASSESPREDETECTION = 24, + VT_NUMCLASSES = 26, + VT_USEREGULARNMS = 28 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t inputSize() const { + return GetField(VT_INPUTSIZE, 0); + } + float hScale() const { + return GetField(VT_HSCALE, 0.0f); + } + float wScale() const { + return GetField(VT_WSCALE, 0.0f); + } + float xScale() const { + return GetField(VT_XSCALE, 0.0f); + } + float yScale() const { + return GetField(VT_YSCALE, 0.0f); + } + float NmsIouThreshold() const { + return GetField(VT_NMSIOUTHRESHOLD, 0.0f); + } + float NmsScoreThreshold() const { + return GetField(VT_NMSSCORETHRESHOLD, 0.0f); + } + int64_t MaxDetections() const { + return GetField(VT_MAXDETECTIONS, 0); + } + int64_t DetectionsPreClass() const { + return GetField(VT_DETECTIONSPRECLASS, 0); + } + int64_t MaxClassesPreDetection() const { + return GetField(VT_MAXCLASSESPREDETECTION, 0); + } + int64_t NumClasses() const { + return GetField(VT_NUMCLASSES, 0); + } + bool UseRegularNms() const { + return GetField(VT_USEREGULARNMS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_INPUTSIZE) && + VerifyField(verifier, VT_HSCALE) && + VerifyField(verifier, VT_WSCALE) && + VerifyField(verifier, VT_XSCALE) && + VerifyField(verifier, VT_YSCALE) && + VerifyField(verifier, VT_NMSIOUTHRESHOLD) && + VerifyField(verifier, VT_NMSSCORETHRESHOLD) && + VerifyField(verifier, VT_MAXDETECTIONS) && + VerifyField(verifier, VT_DETECTIONSPRECLASS) && + VerifyField(verifier, VT_MAXCLASSESPREDETECTION) && + VerifyField(verifier, VT_NUMCLASSES) && + VerifyField(verifier, VT_USEREGULARNMS) && + verifier.EndTable(); + } +}; + +struct DetectionPostProcessBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(DetectionPostProcess::VT_FORMAT, static_cast(format), 0); + } + void add_inputSize(int32_t inputSize) { + fbb_.AddElement(DetectionPostProcess::VT_INPUTSIZE, inputSize, 0); + } + void add_hScale(float hScale) { + fbb_.AddElement(DetectionPostProcess::VT_HSCALE, hScale, 0.0f); + } + void add_wScale(float wScale) { + fbb_.AddElement(DetectionPostProcess::VT_WSCALE, wScale, 0.0f); + } + void add_xScale(float xScale) { + fbb_.AddElement(DetectionPostProcess::VT_XSCALE, xScale, 0.0f); + } + void add_yScale(float yScale) { + fbb_.AddElement(DetectionPostProcess::VT_YSCALE, yScale, 0.0f); + } + void add_NmsIouThreshold(float NmsIouThreshold) { + fbb_.AddElement(DetectionPostProcess::VT_NMSIOUTHRESHOLD, NmsIouThreshold, 0.0f); + } + void add_NmsScoreThreshold(float NmsScoreThreshold) { + fbb_.AddElement(DetectionPostProcess::VT_NMSSCORETHRESHOLD, NmsScoreThreshold, 0.0f); + } + void add_MaxDetections(int64_t MaxDetections) { + fbb_.AddElement(DetectionPostProcess::VT_MAXDETECTIONS, MaxDetections, 0); + } + void add_DetectionsPreClass(int64_t DetectionsPreClass) { + fbb_.AddElement(DetectionPostProcess::VT_DETECTIONSPRECLASS, DetectionsPreClass, 0); + } + void add_MaxClassesPreDetection(int64_t MaxClassesPreDetection) { + fbb_.AddElement(DetectionPostProcess::VT_MAXCLASSESPREDETECTION, MaxClassesPreDetection, 0); + } + void add_NumClasses(int64_t NumClasses) { + fbb_.AddElement(DetectionPostProcess::VT_NUMCLASSES, NumClasses, 0); + } + void add_UseRegularNms(bool UseRegularNms) { + fbb_.AddElement(DetectionPostProcess::VT_USEREGULARNMS, static_cast(UseRegularNms), 0); + } + explicit DetectionPostProcessBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DetectionPostProcessBuilder &operator=(const DetectionPostProcessBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDetectionPostProcess( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t inputSize = 0, + float hScale = 0.0f, + float wScale = 0.0f, + float xScale = 0.0f, + float yScale = 0.0f, + float NmsIouThreshold = 0.0f, + float NmsScoreThreshold = 0.0f, + int64_t MaxDetections = 0, + int64_t DetectionsPreClass = 0, + int64_t MaxClassesPreDetection = 0, + int64_t NumClasses = 0, + bool UseRegularNms = false) { + DetectionPostProcessBuilder builder_(_fbb); + builder_.add_NumClasses(NumClasses); + builder_.add_MaxClassesPreDetection(MaxClassesPreDetection); + builder_.add_DetectionsPreClass(DetectionsPreClass); + builder_.add_MaxDetections(MaxDetections); + builder_.add_NmsScoreThreshold(NmsScoreThreshold); + builder_.add_NmsIouThreshold(NmsIouThreshold); + builder_.add_yScale(yScale); + builder_.add_xScale(xScale); + builder_.add_wScale(wScale); + builder_.add_hScale(hScale); + builder_.add_inputSize(inputSize); + builder_.add_format(format); + builder_.add_UseRegularNms(UseRegularNms); + return builder_.Finish(); +} + +struct FullConnection FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_HASBIAS = 4, + VT_AXIS = 6 + }; + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct FullConnectionBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_hasBias(bool hasBias) { + fbb_.AddElement(FullConnection::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(FullConnection::VT_AXIS, axis, 0); + } + explicit FullConnectionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FullConnectionBuilder &operator=(const FullConnectionBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFullConnection( + flatbuffers::FlatBufferBuilder &_fbb, + bool hasBias = false, + int32_t axis = 0) { + FullConnectionBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_hasBias(hasBias); + return builder_.Finish(); +} + +struct Mean FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_KEEPDIMS = 6 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool keepDims() const { + return GetField(VT_KEEPDIMS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + VerifyField(verifier, VT_KEEPDIMS) && + verifier.EndTable(); + } +}; + +struct MeanBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(Mean::VT_AXIS, axis); + } + void add_keepDims(bool keepDims) { + fbb_.AddElement(Mean::VT_KEEPDIMS, static_cast(keepDims), 0); + } + explicit MeanBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MeanBuilder &operator=(const MeanBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMean( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0, + bool keepDims = false) { + MeanBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_keepDims(keepDims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMeanDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr, + bool keepDims = false) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateMean( + _fbb, + axis__, + keepDims); +} + +struct DeConv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_GROUP = 6, + VT_CHANNELIN = 8, + VT_CHANNELOUT = 10, + VT_KERNELW = 12, + VT_KERNELH = 14, + VT_STRIDEW = 16, + VT_STRIDEH = 18, + VT_PADMODE = 20, + VT_PADUP = 22, + VT_PADDOWN = 24, + VT_PADLEFT = 26, + VT_PADRIGHT = 28, + VT_DILATEW = 30, + VT_DILATEH = 32, + VT_HASBIAS = 34, + VT_ACTIVATIONTYPE = 36 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t group() const { + return GetField(VT_GROUP, 0); + } + int32_t channelIn() const { + return GetField(VT_CHANNELIN, 0); + } + int32_t channelOut() const { + return GetField(VT_CHANNELOUT, 0); + } + int32_t kernelW() const { + return GetField(VT_KERNELW, 0); + } + int32_t kernelH() const { + return GetField(VT_KERNELH, 0); + } + int32_t strideW() const { + return GetField(VT_STRIDEW, 0); + } + int32_t strideH() const { + return GetField(VT_STRIDEH, 0); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t padUp() const { + return GetField(VT_PADUP, 0); + } + int32_t padDown() const { + return GetField(VT_PADDOWN, 0); + } + int32_t padLeft() const { + return GetField(VT_PADLEFT, 0); + } + int32_t padRight() const { + return GetField(VT_PADRIGHT, 0); + } + int32_t dilateW() const { + return GetField(VT_DILATEW, 0); + } + int32_t dilateH() const { + return GetField(VT_DILATEH, 0); + } + bool hasBias() const { + return GetField(VT_HASBIAS, 0) != 0; + } + ActivationType activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_CHANNELIN) && + VerifyField(verifier, VT_CHANNELOUT) && + VerifyField(verifier, VT_KERNELW) && + VerifyField(verifier, VT_KERNELH) && + VerifyField(verifier, VT_STRIDEW) && + VerifyField(verifier, VT_STRIDEH) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_PADUP) && + VerifyField(verifier, VT_PADDOWN) && + VerifyField(verifier, VT_PADLEFT) && + VerifyField(verifier, VT_PADRIGHT) && + VerifyField(verifier, VT_DILATEW) && + VerifyField(verifier, VT_DILATEH) && + VerifyField(verifier, VT_HASBIAS) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + verifier.EndTable(); + } +}; + +struct DeConv2DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(DeConv2D::VT_FORMAT, static_cast(format), 0); + } + void add_group(int32_t group) { + fbb_.AddElement(DeConv2D::VT_GROUP, group, 0); + } + void add_channelIn(int32_t channelIn) { + fbb_.AddElement(DeConv2D::VT_CHANNELIN, channelIn, 0); + } + void add_channelOut(int32_t channelOut) { + fbb_.AddElement(DeConv2D::VT_CHANNELOUT, channelOut, 0); + } + void add_kernelW(int32_t kernelW) { + fbb_.AddElement(DeConv2D::VT_KERNELW, kernelW, 0); + } + void add_kernelH(int32_t kernelH) { + fbb_.AddElement(DeConv2D::VT_KERNELH, kernelH, 0); + } + void add_strideW(int32_t strideW) { + fbb_.AddElement(DeConv2D::VT_STRIDEW, strideW, 0); + } + void add_strideH(int32_t strideH) { + fbb_.AddElement(DeConv2D::VT_STRIDEH, strideH, 0); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(DeConv2D::VT_PADMODE, static_cast(padMode), 0); + } + void add_padUp(int32_t padUp) { + fbb_.AddElement(DeConv2D::VT_PADUP, padUp, 0); + } + void add_padDown(int32_t padDown) { + fbb_.AddElement(DeConv2D::VT_PADDOWN, padDown, 0); + } + void add_padLeft(int32_t padLeft) { + fbb_.AddElement(DeConv2D::VT_PADLEFT, padLeft, 0); + } + void add_padRight(int32_t padRight) { + fbb_.AddElement(DeConv2D::VT_PADRIGHT, padRight, 0); + } + void add_dilateW(int32_t dilateW) { + fbb_.AddElement(DeConv2D::VT_DILATEW, dilateW, 0); + } + void add_dilateH(int32_t dilateH) { + fbb_.AddElement(DeConv2D::VT_DILATEH, dilateH, 0); + } + void add_hasBias(bool hasBias) { + fbb_.AddElement(DeConv2D::VT_HASBIAS, static_cast(hasBias), 0); + } + void add_activationType(ActivationType activationType) { + fbb_.AddElement(DeConv2D::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + explicit DeConv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DeConv2DBuilder &operator=(const DeConv2DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDeConv2D( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + int32_t group = 0, + int32_t channelIn = 0, + int32_t channelOut = 0, + int32_t kernelW = 0, + int32_t kernelH = 0, + int32_t strideW = 0, + int32_t strideH = 0, + PadMode padMode = PadMode_NOTSET, + int32_t padUp = 0, + int32_t padDown = 0, + int32_t padLeft = 0, + int32_t padRight = 0, + int32_t dilateW = 0, + int32_t dilateH = 0, + bool hasBias = false, + ActivationType activationType = ActivationType_NO_ACTIVATION) { + DeConv2DBuilder builder_(_fbb); + builder_.add_dilateH(dilateH); + builder_.add_dilateW(dilateW); + builder_.add_padRight(padRight); + builder_.add_padLeft(padLeft); + builder_.add_padDown(padDown); + builder_.add_padUp(padUp); + builder_.add_strideH(strideH); + builder_.add_strideW(strideW); + builder_.add_kernelH(kernelH); + builder_.add_kernelW(kernelW); + builder_.add_channelOut(channelOut); + builder_.add_channelIn(channelIn); + builder_.add_group(group); + builder_.add_format(format); + builder_.add_activationType(activationType); + builder_.add_hasBias(hasBias); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +struct BNGradInput FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EPS = 4, + VT_CHANNELS = 6 + }; + float eps() const { + return GetField(VT_EPS, 0.0f); + } + int32_t channels() const { + return GetField(VT_CHANNELS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EPS) && + VerifyField(verifier, VT_CHANNELS) && + verifier.EndTable(); + } +}; + +struct BNGradInputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_eps(float eps) { + fbb_.AddElement(BNGradInput::VT_EPS, eps, 0.0f); + } + void add_channels(int32_t channels) { + fbb_.AddElement(BNGradInput::VT_CHANNELS, channels, 0); + } + explicit BNGradInputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BNGradInputBuilder &operator=(const BNGradInputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBNGradInput( + flatbuffers::FlatBufferBuilder &_fbb, + float eps = 0.0f, + int32_t channels = 0) { + BNGradInputBuilder builder_(_fbb); + builder_.add_channels(channels); + builder_.add_eps(eps); + return builder_.Finish(); +} + +struct Scale FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct ScaleBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Scale::VT_FORMAT, static_cast(format), 0); + } + explicit ScaleBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ScaleBuilder &operator=(const ScaleBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateScale( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW) { + ScaleBuilder builder_(_fbb); + builder_.add_format(format); + return builder_.Finish(); +} + +struct Eltwise FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MODE = 4 + }; + EltwiseMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MODE) && + verifier.EndTable(); + } +}; + +struct EltwiseBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(EltwiseMode mode) { + fbb_.AddElement(Eltwise::VT_MODE, static_cast(mode), 0); + } + explicit EltwiseBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EltwiseBuilder &operator=(const EltwiseBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEltwise( + flatbuffers::FlatBufferBuilder &_fbb, + EltwiseMode mode = EltwiseMode_PROD) { + EltwiseBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +struct Add FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AddBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AddBuilder &operator=(const AddBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAdd( + flatbuffers::FlatBufferBuilder &_fbb) { + AddBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Sub FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SubBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SubBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SubBuilder &operator=(const SubBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSub( + flatbuffers::FlatBufferBuilder &_fbb) { + SubBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Mul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MulBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MulBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MulBuilder &operator=(const MulBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMul( + flatbuffers::FlatBufferBuilder &_fbb) { + MulBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Div FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct DivBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DivBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DivBuilder &operator=(const DivBuilder &); + flatbuffers::Offset
Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset
(end); + return o; + } +}; + +inline flatbuffers::Offset
CreateDiv( + flatbuffers::FlatBufferBuilder &_fbb) { + DivBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AddGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AddGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AddGradBuilder &operator=(const AddGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + AddGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SubGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SubGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SubGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SubGradBuilder &operator=(const SubGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSubGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + SubGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MulGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MulGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MulGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MulGradBuilder &operator=(const MulGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMulGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + MulGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct DivGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct DivGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DivGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DivGradBuilder &operator=(const DivGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDivGrad( + flatbuffers::FlatBufferBuilder &_fbb) { + DivGradBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct RealDiv FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RealDivBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RealDivBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RealDivBuilder &operator=(const RealDivBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRealDiv( + flatbuffers::FlatBufferBuilder &_fbb) { + RealDivBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Rsqrt FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RsqrtBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RsqrtBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RsqrtBuilder &operator=(const RsqrtBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRsqrt( + flatbuffers::FlatBufferBuilder &_fbb) { + RsqrtBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Equal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct EqualBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit EqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EqualBuilder &operator=(const EqualBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + EqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Less FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LessBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LessBuilder &operator=(const LessBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLess( + flatbuffers::FlatBufferBuilder &_fbb) { + LessBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Greater FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct GreaterBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GreaterBuilder &operator=(const GreaterBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGreater( + flatbuffers::FlatBufferBuilder &_fbb) { + GreaterBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NotEqual FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct NotEqualBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NotEqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + NotEqualBuilder &operator=(const NotEqualBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNotEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + NotEqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LessEqual FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LessEqualBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessEqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LessEqualBuilder &operator=(const LessEqualBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLessEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + LessEqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GreaterEqual FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct GreaterEqualBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterEqualBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GreaterEqualBuilder &operator=(const GreaterEqualBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGreaterEqual( + flatbuffers::FlatBufferBuilder &_fbb) { + GreaterEqualBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Min FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct MinBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MinBuilder &operator=(const MinBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMin( + flatbuffers::FlatBufferBuilder &_fbb) { + MinBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Slice FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_BEGIN = 6, + VT_SIZE = 8 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *begin() const { + return GetPointer *>(VT_BEGIN); + } + const flatbuffers::Vector *size() const { + return GetPointer *>(VT_SIZE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_BEGIN) && + verifier.VerifyVector(begin()) && + VerifyOffset(verifier, VT_SIZE) && + verifier.VerifyVector(size()) && + verifier.EndTable(); + } +}; + +struct SliceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Slice::VT_FORMAT, static_cast(format), 0); + } + void add_begin(flatbuffers::Offset> begin) { + fbb_.AddOffset(Slice::VT_BEGIN, begin); + } + void add_size(flatbuffers::Offset> size) { + fbb_.AddOffset(Slice::VT_SIZE, size); + } + explicit SliceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SliceBuilder &operator=(const SliceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSlice( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + flatbuffers::Offset> begin = 0, + flatbuffers::Offset> size = 0) { + SliceBuilder builder_(_fbb); + builder_.add_size(size); + builder_.add_begin(begin); + builder_.add_format(format); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSliceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + const std::vector *begin = nullptr, + const std::vector *size = nullptr) { + auto begin__ = begin ? _fbb.CreateVector(*begin) : 0; + auto size__ = size ? _fbb.CreateVector(*size) : 0; + return mindspore::schema::CreateSlice( + _fbb, + format, + begin__, + size__); +} + +struct Floor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FloorBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FloorBuilder &operator=(const FloorBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloor( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Abs FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AbsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AbsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AbsBuilder &operator=(const AbsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAbs( + flatbuffers::FlatBufferBuilder &_fbb) { + AbsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Neg FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct NegBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NegBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + NegBuilder &operator=(const NegBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNeg( + flatbuffers::FlatBufferBuilder &_fbb) { + NegBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Exp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ExpBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ExpBuilder &operator=(const ExpBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExp( + flatbuffers::FlatBufferBuilder &_fbb) { + ExpBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Cos FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CosBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CosBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CosBuilder &operator=(const CosBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCos( + flatbuffers::FlatBufferBuilder &_fbb) { + CosBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Sin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SinBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SinBuilder &operator=(const SinBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSin( + flatbuffers::FlatBufferBuilder &_fbb) { + SinBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Sqrt FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SqrtBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SqrtBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SqrtBuilder &operator=(const SqrtBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSqrt( + flatbuffers::FlatBufferBuilder &_fbb) { + SqrtBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Square FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SquareBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquareBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SquareBuilder &operator=(const SquareBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSquare( + flatbuffers::FlatBufferBuilder &_fbb) { + SquareBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Ceil FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct CeilBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CeilBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CeilBuilder &operator=(const CeilBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCeil( + flatbuffers::FlatBufferBuilder &_fbb) { + CeilBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Log FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LogBuilder &operator=(const LogBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLog( + flatbuffers::FlatBufferBuilder &_fbb) { + LogBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Tan FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct TanBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TanBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TanBuilder &operator=(const TanBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTan( + flatbuffers::FlatBufferBuilder &_fbb) { + TanBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Atan FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AtanBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AtanBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AtanBuilder &operator=(const AtanBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAtan( + flatbuffers::FlatBufferBuilder &_fbb) { + AtanBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Asin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct AsinBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AsinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AsinBuilder &operator=(const AsinBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAsin( + flatbuffers::FlatBufferBuilder &_fbb) { + AsinBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Reshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_SHAPE = 6 + }; + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + const flatbuffers::Vector *shape() const { + return GetPointer *>(VT_SHAPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT) && + VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && + verifier.EndTable(); + } +}; + +struct ReshapeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(Format format) { + fbb_.AddElement(Reshape::VT_FORMAT, static_cast(format), 0); + } + void add_shape(flatbuffers::Offset> shape) { + fbb_.AddOffset(Reshape::VT_SHAPE, shape); + } + explicit ReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReshapeBuilder &operator=(const ReshapeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReshape( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + flatbuffers::Offset> shape = 0) { + ReshapeBuilder builder_(_fbb); + builder_.add_shape(shape); + builder_.add_format(format); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReshapeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + Format format = Format_NCHW, + const std::vector *shape = nullptr) { + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + return mindspore::schema::CreateReshape( + _fbb, + format, + shape__); +} + +struct Power FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_POWER = 4, + VT_SCALE = 6, + VT_SHIFT = 8 + }; + float power() const { + return GetField(VT_POWER, 0.0f); + } + float scale() const { + return GetField(VT_SCALE, 0.0f); + } + float shift() const { + return GetField(VT_SHIFT, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_POWER) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_SHIFT) && + verifier.EndTable(); + } +}; + +struct PowerBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_power(float power) { + fbb_.AddElement(Power::VT_POWER, power, 0.0f); + } + void add_scale(float scale) { + fbb_.AddElement(Power::VT_SCALE, scale, 0.0f); + } + void add_shift(float shift) { + fbb_.AddElement(Power::VT_SHIFT, shift, 0.0f); + } + explicit PowerBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PowerBuilder &operator=(const PowerBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePower( + flatbuffers::FlatBufferBuilder &_fbb, + float power = 0.0f, + float scale = 0.0f, + float shift = 0.0f) { + PowerBuilder builder_(_fbb); + builder_.add_shift(shift); + builder_.add_scale(scale); + builder_.add_power(power); + return builder_.Finish(); +} + +struct PowerGrad FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_POWER = 4, + VT_SCALE = 6, + VT_SHIFT = 8 + }; + float power() const { + return GetField(VT_POWER, 0.0f); + } + float scale() const { + return GetField(VT_SCALE, 0.0f); + } + float shift() const { + return GetField(VT_SHIFT, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_POWER) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_SHIFT) && + verifier.EndTable(); + } +}; + +struct PowerGradBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_power(float power) { + fbb_.AddElement(PowerGrad::VT_POWER, power, 0.0f); + } + void add_scale(float scale) { + fbb_.AddElement(PowerGrad::VT_SCALE, scale, 0.0f); + } + void add_shift(float shift) { + fbb_.AddElement(PowerGrad::VT_SHIFT, shift, 0.0f); + } + explicit PowerGradBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PowerGradBuilder &operator=(const PowerGradBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePowerGrad( + flatbuffers::FlatBufferBuilder &_fbb, + float power = 0.0f, + float scale = 0.0f, + float shift = 0.0f) { + PowerGradBuilder builder_(_fbb); + builder_.add_shift(shift); + builder_.add_scale(scale); + builder_.add_power(power); + return builder_.Finish(); +} + +struct ArgMax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_OUTMAXVALUE = 6, + VT_TOPK = 8, + VT_KEEPDIMS = 10, + VT_AXISTYPE = 12 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool outMaxValue() const { + return GetField(VT_OUTMAXVALUE, 0) != 0; + } + int32_t topK() const { + return GetField(VT_TOPK, 1); + } + bool keepDims() const { + return GetField(VT_KEEPDIMS, 0) != 0; + } + int32_t axisType() const { + return GetField(VT_AXISTYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_OUTMAXVALUE) && + VerifyField(verifier, VT_TOPK) && + VerifyField(verifier, VT_KEEPDIMS) && + VerifyField(verifier, VT_AXISTYPE) && + verifier.EndTable(); + } +}; + +struct ArgMaxBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(ArgMax::VT_AXIS, axis, 0); + } + void add_outMaxValue(bool outMaxValue) { + fbb_.AddElement(ArgMax::VT_OUTMAXVALUE, static_cast(outMaxValue), 0); + } + void add_topK(int32_t topK) { + fbb_.AddElement(ArgMax::VT_TOPK, topK, 1); + } + void add_keepDims(bool keepDims) { + fbb_.AddElement(ArgMax::VT_KEEPDIMS, static_cast(keepDims), 0); + } + void add_axisType(int32_t axisType) { + fbb_.AddElement(ArgMax::VT_AXISTYPE, axisType, 0); + } + explicit ArgMaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ArgMaxBuilder &operator=(const ArgMaxBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMax( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + bool outMaxValue = false, + int32_t topK = 1, + bool keepDims = false, + int32_t axisType = 0) { + ArgMaxBuilder builder_(_fbb); + builder_.add_axisType(axisType); + builder_.add_topK(topK); + builder_.add_axis(axis); + builder_.add_keepDims(keepDims); + builder_.add_outMaxValue(outMaxValue); + return builder_.Finish(); +} + +struct ArgMin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_OUTMAXVALUE = 6, + VT_TOPK = 8, + VT_KEEPDIMS = 10, + VT_AXISTYPE = 12 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool outMaxValue() const { + return GetField(VT_OUTMAXVALUE, 0) != 0; + } + int32_t topK() const { + return GetField(VT_TOPK, 1); + } + bool keepDims() const { + return GetField(VT_KEEPDIMS, 0) != 0; + } + int32_t axisType() const { + return GetField(VT_AXISTYPE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_OUTMAXVALUE) && + VerifyField(verifier, VT_TOPK) && + VerifyField(verifier, VT_KEEPDIMS) && + VerifyField(verifier, VT_AXISTYPE) && + verifier.EndTable(); + } +}; + +struct ArgMinBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(ArgMin::VT_AXIS, axis, 0); + } + void add_outMaxValue(bool outMaxValue) { + fbb_.AddElement(ArgMin::VT_OUTMAXVALUE, static_cast(outMaxValue), 0); + } + void add_topK(int32_t topK) { + fbb_.AddElement(ArgMin::VT_TOPK, topK, 1); + } + void add_keepDims(bool keepDims) { + fbb_.AddElement(ArgMin::VT_KEEPDIMS, static_cast(keepDims), 0); + } + void add_axisType(int32_t axisType) { + fbb_.AddElement(ArgMin::VT_AXISTYPE, axisType, 0); + } + explicit ArgMinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ArgMinBuilder &operator=(const ArgMinBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMin( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + bool outMaxValue = false, + int32_t topK = 1, + bool keepDims = false, + int32_t axisType = 0) { + ArgMinBuilder builder_(_fbb); + builder_.add_axisType(axisType); + builder_.add_topK(topK); + builder_.add_axis(axis); + builder_.add_keepDims(keepDims); + builder_.add_outMaxValue(outMaxValue); + return builder_.Finish(); +} + +struct NetOutput FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct NetOutputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NetOutputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + NetOutputBuilder &operator=(const NetOutputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNetOutput( + flatbuffers::FlatBufferBuilder &_fbb) { + NetOutputBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TRANSPOSEA = 4, + VT_TRANSPOSEB = 6 + }; + bool transposeA() const { + return GetField(VT_TRANSPOSEA, 0) != 0; + } + bool transposeB() const { + return GetField(VT_TRANSPOSEB, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TRANSPOSEA) && + VerifyField(verifier, VT_TRANSPOSEB) && + verifier.EndTable(); + } +}; + +struct MatMulBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_transposeA(bool transposeA) { + fbb_.AddElement(MatMul::VT_TRANSPOSEA, static_cast(transposeA), 0); + } + void add_transposeB(bool transposeB) { + fbb_.AddElement(MatMul::VT_TRANSPOSEB, static_cast(transposeB), 0); + } + explicit MatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MatMulBuilder &operator=(const MatMulBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMatMul( + flatbuffers::FlatBufferBuilder &_fbb, + bool transposeA = false, + bool transposeB = false) { + MatMulBuilder builder_(_fbb); + builder_.add_transposeB(transposeB); + builder_.add_transposeA(transposeA); + return builder_.Finish(); +} + +struct CaffePReLU FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CHANNELSHARED = 4 + }; + bool channelShared() const { + return GetField(VT_CHANNELSHARED, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_CHANNELSHARED) && + verifier.EndTable(); + } +}; + +struct CaffePReLUBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_channelShared(bool channelShared) { + fbb_.AddElement(CaffePReLU::VT_CHANNELSHARED, static_cast(channelShared), 0); + } + explicit CaffePReLUBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CaffePReLUBuilder &operator=(const CaffePReLUBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCaffePReLU( + flatbuffers::FlatBufferBuilder &_fbb, + bool channelShared = false) { + CaffePReLUBuilder builder_(_fbb); + builder_.add_channelShared(channelShared); + return builder_.Finish(); +} + +struct LeakyReLU FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NEGATIVESLOPE = 4 + }; + float negativeSlope() const { + return GetField(VT_NEGATIVESLOPE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NEGATIVESLOPE) && + verifier.EndTable(); + } +}; + +struct LeakyReLUBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_negativeSlope(float negativeSlope) { + fbb_.AddElement(LeakyReLU::VT_NEGATIVESLOPE, negativeSlope, 0.0f); + } + explicit LeakyReLUBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LeakyReLUBuilder &operator=(const LeakyReLUBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLeakyReLU( + flatbuffers::FlatBufferBuilder &_fbb, + float negativeSlope = 0.0f) { + LeakyReLUBuilder builder_(_fbb); + builder_.add_negativeSlope(negativeSlope); + return builder_.Finish(); +} + +struct StridedSlice FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BEGINMASK = 4, + VT_ENDMASK = 6, + VT_ELLIPSISMASK = 8, + VT_NEWAXISMASK = 10, + VT_SHRINKAXISMASK = 12, + VT_BEGIN = 14, + VT_END = 16, + VT_STRIDE = 18, + VT_ISSCALE = 20 + }; + int32_t beginMask() const { + return GetField(VT_BEGINMASK, 0); + } + int32_t endMask() const { + return GetField(VT_ENDMASK, 0); + } + int32_t ellipsisMask() const { + return GetField(VT_ELLIPSISMASK, 0); + } + int32_t newAxisMask() const { + return GetField(VT_NEWAXISMASK, 0); + } + int32_t shrinkAxisMask() const { + return GetField(VT_SHRINKAXISMASK, 0); + } + const flatbuffers::Vector *begin() const { + return GetPointer *>(VT_BEGIN); + } + const flatbuffers::Vector *end() const { + return GetPointer *>(VT_END); + } + const flatbuffers::Vector *stride() const { + return GetPointer *>(VT_STRIDE); + } + const flatbuffers::Vector *isScale() const { + return GetPointer *>(VT_ISSCALE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BEGINMASK) && + VerifyField(verifier, VT_ENDMASK) && + VerifyField(verifier, VT_ELLIPSISMASK) && + VerifyField(verifier, VT_NEWAXISMASK) && + VerifyField(verifier, VT_SHRINKAXISMASK) && + VerifyOffset(verifier, VT_BEGIN) && + verifier.VerifyVector(begin()) && + VerifyOffset(verifier, VT_END) && + verifier.VerifyVector(end()) && + VerifyOffset(verifier, VT_STRIDE) && + verifier.VerifyVector(stride()) && + VerifyOffset(verifier, VT_ISSCALE) && + verifier.VerifyVector(isScale()) && + verifier.EndTable(); + } +}; + +struct StridedSliceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beginMask(int32_t beginMask) { + fbb_.AddElement(StridedSlice::VT_BEGINMASK, beginMask, 0); + } + void add_endMask(int32_t endMask) { + fbb_.AddElement(StridedSlice::VT_ENDMASK, endMask, 0); + } + void add_ellipsisMask(int32_t ellipsisMask) { + fbb_.AddElement(StridedSlice::VT_ELLIPSISMASK, ellipsisMask, 0); + } + void add_newAxisMask(int32_t newAxisMask) { + fbb_.AddElement(StridedSlice::VT_NEWAXISMASK, newAxisMask, 0); + } + void add_shrinkAxisMask(int32_t shrinkAxisMask) { + fbb_.AddElement(StridedSlice::VT_SHRINKAXISMASK, shrinkAxisMask, 0); + } + void add_begin(flatbuffers::Offset> begin) { + fbb_.AddOffset(StridedSlice::VT_BEGIN, begin); + } + void add_end(flatbuffers::Offset> end) { + fbb_.AddOffset(StridedSlice::VT_END, end); + } + void add_stride(flatbuffers::Offset> stride) { + fbb_.AddOffset(StridedSlice::VT_STRIDE, stride); + } + void add_isScale(flatbuffers::Offset> isScale) { + fbb_.AddOffset(StridedSlice::VT_ISSCALE, isScale); + } + explicit StridedSliceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + StridedSliceBuilder &operator=(const StridedSliceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStridedSlice( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t beginMask = 0, + int32_t endMask = 0, + int32_t ellipsisMask = 0, + int32_t newAxisMask = 0, + int32_t shrinkAxisMask = 0, + flatbuffers::Offset> begin = 0, + flatbuffers::Offset> end = 0, + flatbuffers::Offset> stride = 0, + flatbuffers::Offset> isScale = 0) { + StridedSliceBuilder builder_(_fbb); + builder_.add_isScale(isScale); + builder_.add_stride(stride); + builder_.add_end(end); + builder_.add_begin(begin); + builder_.add_shrinkAxisMask(shrinkAxisMask); + builder_.add_newAxisMask(newAxisMask); + builder_.add_ellipsisMask(ellipsisMask); + builder_.add_endMask(endMask); + builder_.add_beginMask(beginMask); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateStridedSliceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t beginMask = 0, + int32_t endMask = 0, + int32_t ellipsisMask = 0, + int32_t newAxisMask = 0, + int32_t shrinkAxisMask = 0, + const std::vector *begin = nullptr, + const std::vector *end = nullptr, + const std::vector *stride = nullptr, + const std::vector *isScale = nullptr) { + auto begin__ = begin ? _fbb.CreateVector(*begin) : 0; + auto end__ = end ? _fbb.CreateVector(*end) : 0; + auto stride__ = stride ? _fbb.CreateVector(*stride) : 0; + auto isScale__ = isScale ? _fbb.CreateVector(*isScale) : 0; + return mindspore::schema::CreateStridedSlice( + _fbb, + beginMask, + endMask, + ellipsisMask, + newAxisMask, + shrinkAxisMask, + begin__, + end__, + stride__, + isScale__); +} + +struct Stack FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_N = 6, + VT_ISSCALE = 8 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + int32_t n() const { + return GetField(VT_N, 0); + } + const flatbuffers::Vector *isScale() const { + return GetPointer *>(VT_ISSCALE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_N) && + VerifyOffset(verifier, VT_ISSCALE) && + verifier.VerifyVector(isScale()) && + verifier.EndTable(); + } +}; + +struct StackBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Stack::VT_AXIS, axis, 0); + } + void add_n(int32_t n) { + fbb_.AddElement(Stack::VT_N, n, 0); + } + void add_isScale(flatbuffers::Offset> isScale) { + fbb_.AddOffset(Stack::VT_ISSCALE, isScale); + } + explicit StackBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + StackBuilder &operator=(const StackBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStack( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t n = 0, + flatbuffers::Offset> isScale = 0) { + StackBuilder builder_(_fbb); + builder_.add_isScale(isScale); + builder_.add_n(n); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateStackDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t n = 0, + const std::vector *isScale = nullptr) { + auto isScale__ = isScale ? _fbb.CreateVector(*isScale) : 0; + return mindspore::schema::CreateStack( + _fbb, + axis, + n, + isScale__); +} + +struct Range FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DTYPE = 4, + VT_START = 6, + VT_LIMIT = 8, + VT_DELTA = 10 + }; + int32_t dType() const { + return GetField(VT_DTYPE, 0); + } + int32_t start() const { + return GetField(VT_START, 0); + } + int32_t limit() const { + return GetField(VT_LIMIT, 0); + } + int32_t delta() const { + return GetField(VT_DELTA, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DTYPE) && + VerifyField(verifier, VT_START) && + VerifyField(verifier, VT_LIMIT) && + VerifyField(verifier, VT_DELTA) && + verifier.EndTable(); + } +}; + +struct RangeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dType(int32_t dType) { + fbb_.AddElement(Range::VT_DTYPE, dType, 0); + } + void add_start(int32_t start) { + fbb_.AddElement(Range::VT_START, start, 0); + } + void add_limit(int32_t limit) { + fbb_.AddElement(Range::VT_LIMIT, limit, 0); + } + void add_delta(int32_t delta) { + fbb_.AddElement(Range::VT_DELTA, delta, 0); + } + explicit RangeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RangeBuilder &operator=(const RangeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRange( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t dType = 0, + int32_t start = 0, + int32_t limit = 0, + int32_t delta = 0) { + RangeBuilder builder_(_fbb); + builder_.add_delta(delta); + builder_.add_limit(limit); + builder_.add_start(start); + builder_.add_dType(dType); + return builder_.Finish(); +} + +struct ExpandDims FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIM = 4 + }; + int32_t dim() const { + return GetField(VT_DIM, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DIM) && + verifier.EndTable(); + } +}; + +struct ExpandDimsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dim(int32_t dim) { + fbb_.AddElement(ExpandDims::VT_DIM, dim, 0); + } + explicit ExpandDimsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ExpandDimsBuilder &operator=(const ExpandDimsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExpandDims( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t dim = 0) { + ExpandDimsBuilder builder_(_fbb); + builder_.add_dim(dim); + return builder_.Finish(); +} + +struct Tile FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MULTIPLES = 4 + }; + const flatbuffers::Vector *multiples() const { + return GetPointer *>(VT_MULTIPLES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_MULTIPLES) && + verifier.VerifyVector(multiples()) && + verifier.EndTable(); + } +}; + +struct TileBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_multiples(flatbuffers::Offset> multiples) { + fbb_.AddOffset(Tile::VT_MULTIPLES, multiples); + } + explicit TileBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TileBuilder &operator=(const TileBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTile( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> multiples = 0) { + TileBuilder builder_(_fbb); + builder_.add_multiples(multiples); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTileDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *multiples = nullptr) { + auto multiples__ = multiples ? _fbb.CreateVector(*multiples) : 0; + return mindspore::schema::CreateTile( + _fbb, + multiples__); +} + +struct Cast FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SRCT = 4, + VT_DSTT = 6 + }; + int32_t srcT() const { + return GetField(VT_SRCT, 0); + } + int32_t dstT() const { + return GetField(VT_DSTT, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SRCT) && + VerifyField(verifier, VT_DSTT) && + verifier.EndTable(); + } +}; + +struct CastBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_srcT(int32_t srcT) { + fbb_.AddElement(Cast::VT_SRCT, srcT, 0); + } + void add_dstT(int32_t dstT) { + fbb_.AddElement(Cast::VT_DSTT, dstT, 0); + } + explicit CastBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CastBuilder &operator=(const CastBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCast( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t srcT = 0, + int32_t dstT = 0) { + CastBuilder builder_(_fbb); + builder_.add_dstT(dstT); + builder_.add_srcT(srcT); + return builder_.Finish(); +} + +struct QuantDTypeCast FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SRCT = 4, + VT_DSTT = 6 + }; + int32_t srcT() const { + return GetField(VT_SRCT, 0); + } + int32_t dstT() const { + return GetField(VT_DSTT, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SRCT) && + VerifyField(verifier, VT_DSTT) && + verifier.EndTable(); + } +}; + +struct QuantDTypeCastBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_srcT(int32_t srcT) { + fbb_.AddElement(QuantDTypeCast::VT_SRCT, srcT, 0); + } + void add_dstT(int32_t dstT) { + fbb_.AddElement(QuantDTypeCast::VT_DSTT, dstT, 0); + } + explicit QuantDTypeCastBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantDTypeCastBuilder &operator=(const QuantDTypeCastBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantDTypeCast( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t srcT = 0, + int32_t dstT = 0) { + QuantDTypeCastBuilder builder_(_fbb); + builder_.add_dstT(dstT); + builder_.add_srcT(srcT); + return builder_.Finish(); +} + +struct Split FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUMBERSPLIT = 4, + VT_SIZESPLITS = 6, + VT_SPLITDIM = 8 + }; + int32_t numberSplit() const { + return GetField(VT_NUMBERSPLIT, 0); + } + const flatbuffers::Vector *sizeSplits() const { + return GetPointer *>(VT_SIZESPLITS); + } + int32_t splitDim() const { + return GetField(VT_SPLITDIM, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUMBERSPLIT) && + VerifyOffset(verifier, VT_SIZESPLITS) && + verifier.VerifyVector(sizeSplits()) && + VerifyField(verifier, VT_SPLITDIM) && + verifier.EndTable(); + } +}; + +struct SplitBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_numberSplit(int32_t numberSplit) { + fbb_.AddElement(Split::VT_NUMBERSPLIT, numberSplit, 0); + } + void add_sizeSplits(flatbuffers::Offset> sizeSplits) { + fbb_.AddOffset(Split::VT_SIZESPLITS, sizeSplits); + } + void add_splitDim(int32_t splitDim) { + fbb_.AddElement(Split::VT_SPLITDIM, splitDim, 0); + } + explicit SplitBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SplitBuilder &operator=(const SplitBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSplit( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t numberSplit = 0, + flatbuffers::Offset> sizeSplits = 0, + int32_t splitDim = 0) { + SplitBuilder builder_(_fbb); + builder_.add_splitDim(splitDim); + builder_.add_sizeSplits(sizeSplits); + builder_.add_numberSplit(numberSplit); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSplitDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t numberSplit = 0, + const std::vector *sizeSplits = nullptr, + int32_t splitDim = 0) { + auto sizeSplits__ = sizeSplits ? _fbb.CreateVector(*sizeSplits) : 0; + return mindspore::schema::CreateSplit( + _fbb, + numberSplit, + sizeSplits__, + splitDim); +} + +struct Crop FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_OFFSETS = 6 + }; + int64_t axis() const { + return GetField(VT_AXIS, 0); + } + const flatbuffers::Vector *offsets() const { + return GetPointer *>(VT_OFFSETS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyOffset(verifier, VT_OFFSETS) && + verifier.VerifyVector(offsets()) && + verifier.EndTable(); + } +}; + +struct CropBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int64_t axis) { + fbb_.AddElement(Crop::VT_AXIS, axis, 0); + } + void add_offsets(flatbuffers::Offset> offsets) { + fbb_.AddOffset(Crop::VT_OFFSETS, offsets); + } + explicit CropBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CropBuilder &operator=(const CropBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCrop( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + flatbuffers::Offset> offsets = 0) { + CropBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_offsets(offsets); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCropDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t axis = 0, + const std::vector *offsets = nullptr) { + auto offsets__ = offsets ? _fbb.CreateVector(*offsets) : 0; + return mindspore::schema::CreateCrop( + _fbb, + axis, + offsets__); +} + +struct Permute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ORDER = 4 + }; + const flatbuffers::Vector *order() const { + return GetPointer *>(VT_ORDER); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ORDER) && + verifier.VerifyVector(order()) && + verifier.EndTable(); + } +}; + +struct PermuteBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_order(flatbuffers::Offset> order) { + fbb_.AddOffset(Permute::VT_ORDER, order); + } + explicit PermuteBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PermuteBuilder &operator=(const PermuteBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePermute( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> order = 0) { + PermuteBuilder builder_(_fbb); + builder_.add_order(order); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePermuteDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *order = nullptr) { + auto order__ = order ? _fbb.CreateVector(*order) : 0; + return mindspore::schema::CreatePermute( + _fbb, + order__); +} + +struct Clip FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MAX = 4, + VT_MIN = 6 + }; + float max() const { + return GetField(VT_MAX, 0.0f); + } + float min() const { + return GetField(VT_MIN, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MAX) && + VerifyField(verifier, VT_MIN) && + verifier.EndTable(); + } +}; + +struct ClipBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_max(float max) { + fbb_.AddElement(Clip::VT_MAX, max, 0.0f); + } + void add_min(float min) { + fbb_.AddElement(Clip::VT_MIN, min, 0.0f); + } + explicit ClipBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ClipBuilder &operator=(const ClipBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateClip( + flatbuffers::FlatBufferBuilder &_fbb, + float max = 0.0f, + float min = 0.0f) { + ClipBuilder builder_(_fbb); + builder_.add_min(min); + builder_.add_max(max); + return builder_.Finish(); +} + +struct Constant FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ConstantBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ConstantBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ConstantBuilder &operator=(const ConstantBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConstant( + flatbuffers::FlatBufferBuilder &_fbb) { + ConstantBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Elu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALPHA = 4 + }; + float alpha() const { + return GetField(VT_ALPHA, 1.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALPHA) && + verifier.EndTable(); + } +}; + +struct EluBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(Elu::VT_ALPHA, alpha, 1.0f); + } + explicit EluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EluBuilder &operator=(const EluBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateElu( + flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 1.0f) { + EluBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +struct Broadcast FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct BroadcastBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit BroadcastBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BroadcastBuilder &operator=(const BroadcastBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBroadcast( + flatbuffers::FlatBufferBuilder &_fbb) { + BroadcastBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BroadcastTo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DST_SHAPE = 4 + }; + const flatbuffers::Vector *dst_shape() const { + return GetPointer *>(VT_DST_SHAPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DST_SHAPE) && + verifier.VerifyVector(dst_shape()) && + verifier.EndTable(); + } +}; + +struct BroadcastToBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dst_shape(flatbuffers::Offset> dst_shape) { + fbb_.AddOffset(BroadcastTo::VT_DST_SHAPE, dst_shape); + } + explicit BroadcastToBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BroadcastToBuilder &operator=(const BroadcastToBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBroadcastTo( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dst_shape = 0) { + BroadcastToBuilder builder_(_fbb); + builder_.add_dst_shape(dst_shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBroadcastToDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dst_shape = nullptr) { + auto dst_shape__ = dst_shape ? _fbb.CreateVector(*dst_shape) : 0; + return mindspore::schema::CreateBroadcastTo( + _fbb, + dst_shape__); +} + +struct Lrn FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALPHA = 4, + VT_BETA = 6, + VT_BIAS = 8, + VT_SIZE = 10 + }; + float alpha() const { + return GetField(VT_ALPHA, 0.0001f); + } + float beta() const { + return GetField(VT_BETA, 0.75f); + } + float bias() const { + return GetField(VT_BIAS, 1.0f); + } + int32_t size() const { + return GetField(VT_SIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALPHA) && + VerifyField(verifier, VT_BETA) && + VerifyField(verifier, VT_BIAS) && + VerifyField(verifier, VT_SIZE) && + verifier.EndTable(); + } +}; + +struct LrnBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(Lrn::VT_ALPHA, alpha, 0.0001f); + } + void add_beta(float beta) { + fbb_.AddElement(Lrn::VT_BETA, beta, 0.75f); + } + void add_bias(float bias) { + fbb_.AddElement(Lrn::VT_BIAS, bias, 1.0f); + } + void add_size(int32_t size) { + fbb_.AddElement(Lrn::VT_SIZE, size, 0); + } + explicit LrnBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LrnBuilder &operator=(const LrnBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLrn( + flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 0.0001f, + float beta = 0.75f, + float bias = 1.0f, + int32_t size = 0) { + LrnBuilder builder_(_fbb); + builder_.add_size(size); + builder_.add_bias(bias); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +struct Reduce FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXES = 4, + VT_KEEPDIMS = 6, + VT_MODE = 8 + }; + const flatbuffers::Vector *axes() const { + return GetPointer *>(VT_AXES); + } + int32_t keepDims() const { + return GetField(VT_KEEPDIMS, 0); + } + ReduceMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXES) && + verifier.VerifyVector(axes()) && + VerifyField(verifier, VT_KEEPDIMS) && + VerifyField(verifier, VT_MODE) && + verifier.EndTable(); + } +}; + +struct ReduceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axes(flatbuffers::Offset> axes) { + fbb_.AddOffset(Reduce::VT_AXES, axes); + } + void add_keepDims(int32_t keepDims) { + fbb_.AddElement(Reduce::VT_KEEPDIMS, keepDims, 0); + } + void add_mode(ReduceMode mode) { + fbb_.AddElement(Reduce::VT_MODE, static_cast(mode), 0); + } + explicit ReduceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReduceBuilder &operator=(const ReduceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReduce( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axes = 0, + int32_t keepDims = 0, + ReduceMode mode = ReduceMode_ReduceMean) { + ReduceBuilder builder_(_fbb); + builder_.add_keepDims(keepDims); + builder_.add_axes(axes); + builder_.add_mode(mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReduceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axes = nullptr, + int32_t keepDims = 0, + ReduceMode mode = ReduceMode_ReduceMean) { + auto axes__ = axes ? _fbb.CreateVector(*axes) : 0; + return mindspore::schema::CreateReduce( + _fbb, + axes__, + keepDims, + mode); +} + +struct Prelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SLOPE = 4 + }; + const flatbuffers::Vector *slope() const { + return GetPointer *>(VT_SLOPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SLOPE) && + verifier.VerifyVector(slope()) && + verifier.EndTable(); + } +}; + +struct PreluBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_slope(flatbuffers::Offset> slope) { + fbb_.AddOffset(Prelu::VT_SLOPE, slope); + } + explicit PreluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PreluBuilder &operator=(const PreluBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePrelu( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> slope = 0) { + PreluBuilder builder_(_fbb); + builder_.add_slope(slope); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePreluDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *slope = nullptr) { + auto slope__ = slope ? _fbb.CreateVector(*slope) : 0; + return mindspore::schema::CreatePrelu( + _fbb, + slope__); +} + +struct Transpose FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PERM = 4, + VT_CONJUGATE = 6 + }; + const flatbuffers::Vector *perm() const { + return GetPointer *>(VT_PERM); + } + bool conjugate() const { + return GetField(VT_CONJUGATE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_PERM) && + verifier.VerifyVector(perm()) && + VerifyField(verifier, VT_CONJUGATE) && + verifier.EndTable(); + } +}; + +struct TransposeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_perm(flatbuffers::Offset> perm) { + fbb_.AddOffset(Transpose::VT_PERM, perm); + } + void add_conjugate(bool conjugate) { + fbb_.AddElement(Transpose::VT_CONJUGATE, static_cast(conjugate), 0); + } + explicit TransposeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TransposeBuilder &operator=(const TransposeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTranspose( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> perm = 0, + bool conjugate = false) { + TransposeBuilder builder_(_fbb); + builder_.add_perm(perm); + builder_.add_conjugate(conjugate); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTransposeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *perm = nullptr, + bool conjugate = false) { + auto perm__ = perm ? _fbb.CreateVector(*perm) : 0; + return mindspore::schema::CreateTranspose( + _fbb, + perm__, + conjugate); +} + +struct Squeeze FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct SqueezeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(Squeeze::VT_AXIS, axis); + } + explicit SqueezeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SqueezeBuilder &operator=(const SqueezeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSqueeze( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + SqueezeBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSqueezeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateSqueeze( + _fbb, + axis__); +} + +struct Unsqueeze FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + verifier.EndTable(); + } +}; + +struct UnsqueezeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(Unsqueeze::VT_AXIS, axis); + } + explicit UnsqueezeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + UnsqueezeBuilder &operator=(const UnsqueezeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsqueeze( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0) { + UnsqueezeBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateUnsqueezeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateUnsqueeze( + _fbb, + axis__); +} + +struct Upsample FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MODE = 4, + VT_SCALES = 6 + }; + const flatbuffers::String *mode() const { + return GetPointer(VT_MODE); + } + const flatbuffers::Vector *scales() const { + return GetPointer *>(VT_SCALES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_MODE) && + verifier.VerifyString(mode()) && + VerifyOffset(verifier, VT_SCALES) && + verifier.VerifyVector(scales()) && + verifier.EndTable(); + } +}; + +struct UpsampleBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(flatbuffers::Offset mode) { + fbb_.AddOffset(Upsample::VT_MODE, mode); + } + void add_scales(flatbuffers::Offset> scales) { + fbb_.AddOffset(Upsample::VT_SCALES, scales); + } + explicit UpsampleBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + UpsampleBuilder &operator=(const UpsampleBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUpsample( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset mode = 0, + flatbuffers::Offset> scales = 0) { + UpsampleBuilder builder_(_fbb); + builder_.add_scales(scales); + builder_.add_mode(mode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateUpsampleDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *mode = nullptr, + const std::vector *scales = nullptr) { + auto mode__ = mode ? _fbb.CreateString(mode) : 0; + auto scales__ = scales ? _fbb.CreateVector(*scales) : 0; + return mindspore::schema::CreateUpsample( + _fbb, + mode__, + scales__); +} + +struct Dropout FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_RATIO = 4 + }; + float ratio() const { + return GetField(VT_RATIO, 0.5f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_RATIO) && + verifier.EndTable(); + } +}; + +struct DropoutBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_ratio(float ratio) { + fbb_.AddElement(Dropout::VT_RATIO, ratio, 0.5f); + } + explicit DropoutBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DropoutBuilder &operator=(const DropoutBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDropout( + flatbuffers::FlatBufferBuilder &_fbb, + float ratio = 0.5f) { + DropoutBuilder builder_(_fbb); + builder_.add_ratio(ratio); + return builder_.Finish(); +} + +struct LocalResponseNormalization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DEPTH_RADIUS = 4, + VT_BIAS = 6, + VT_ALPHA = 8, + VT_BETA = 10 + }; + int32_t depth_radius() const { + return GetField(VT_DEPTH_RADIUS, 0); + } + float bias() const { + return GetField(VT_BIAS, 0.0f); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DEPTH_RADIUS) && + VerifyField(verifier, VT_BIAS) && + VerifyField(verifier, VT_ALPHA) && + VerifyField(verifier, VT_BETA) && + verifier.EndTable(); + } +}; + +struct LocalResponseNormalizationBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_depth_radius(int32_t depth_radius) { + fbb_.AddElement(LocalResponseNormalization::VT_DEPTH_RADIUS, depth_radius, 0); + } + void add_bias(float bias) { + fbb_.AddElement(LocalResponseNormalization::VT_BIAS, bias, 0.0f); + } + void add_alpha(float alpha) { + fbb_.AddElement(LocalResponseNormalization::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) { + fbb_.AddElement(LocalResponseNormalization::VT_BETA, beta, 0.0f); + } + explicit LocalResponseNormalizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LocalResponseNormalizationBuilder &operator=(const LocalResponseNormalizationBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLocalResponseNormalization( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t depth_radius = 0, + float bias = 0.0f, + float alpha = 0.0f, + float beta = 0.0f) { + LocalResponseNormalizationBuilder builder_(_fbb); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_bias(bias); + builder_.add_depth_radius(depth_radius); + return builder_.Finish(); +} + +struct ZerosLike FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct ZerosLikeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ZerosLikeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ZerosLikeBuilder &operator=(const ZerosLikeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateZerosLike( + flatbuffers::FlatBufferBuilder &_fbb) { + ZerosLikeBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TopK FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_K = 4, + VT_SORTED = 6 + }; + int32_t k() const { + return GetField(VT_K, 0); + } + bool sorted() const { + return GetField(VT_SORTED, 1) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_K) && + VerifyField(verifier, VT_SORTED) && + verifier.EndTable(); + } +}; + +struct TopKBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_k(int32_t k) { + fbb_.AddElement(TopK::VT_K, k, 0); + } + void add_sorted(bool sorted) { + fbb_.AddElement(TopK::VT_SORTED, static_cast(sorted), 1); + } + explicit TopKBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TopKBuilder &operator=(const TopKBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTopK( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t k = 0, + bool sorted = true) { + TopKBuilder builder_(_fbb); + builder_.add_k(k); + builder_.add_sorted(sorted); + return builder_.Finish(); +} + +struct SpaceToDepth FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCKSIZE = 4, + VT_FORMAT = 6 + }; + int32_t blockSize() const { + return GetField(VT_BLOCKSIZE, 0); + } + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCKSIZE) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct SpaceToDepthBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blockSize(int32_t blockSize) { + fbb_.AddElement(SpaceToDepth::VT_BLOCKSIZE, blockSize, 0); + } + void add_format(Format format) { + fbb_.AddElement(SpaceToDepth::VT_FORMAT, static_cast(format), 0); + } + explicit SpaceToDepthBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SpaceToDepthBuilder &operator=(const SpaceToDepthBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToDepth( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t blockSize = 0, + Format format = Format_NCHW) { + SpaceToDepthBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_blockSize(blockSize); + return builder_.Finish(); +} + +struct SpaceToBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCKSHAPE = 4, + VT_PADDINGS = 6 + }; + const flatbuffers::Vector *blockShape() const { + return GetPointer *>(VT_BLOCKSHAPE); + } + const flatbuffers::Vector *paddings() const { + return GetPointer *>(VT_PADDINGS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCKSHAPE) && + verifier.VerifyVector(blockShape()) && + VerifyOffset(verifier, VT_PADDINGS) && + verifier.VerifyVector(paddings()) && + verifier.EndTable(); + } +}; + +struct SpaceToBatchBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blockShape(flatbuffers::Offset> blockShape) { + fbb_.AddOffset(SpaceToBatch::VT_BLOCKSHAPE, blockShape); + } + void add_paddings(flatbuffers::Offset> paddings) { + fbb_.AddOffset(SpaceToBatch::VT_PADDINGS, paddings); + } + explicit SpaceToBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SpaceToBatchBuilder &operator=(const SpaceToBatchBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToBatch( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> blockShape = 0, + flatbuffers::Offset> paddings = 0) { + SpaceToBatchBuilder builder_(_fbb); + builder_.add_paddings(paddings); + builder_.add_blockShape(blockShape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSpaceToBatchDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *blockShape = nullptr, + const std::vector *paddings = nullptr) { + auto blockShape__ = blockShape ? _fbb.CreateVector(*blockShape) : 0; + auto paddings__ = paddings ? _fbb.CreateVector(*paddings) : 0; + return mindspore::schema::CreateSpaceToBatch( + _fbb, + blockShape__, + paddings__); +} + +struct SparseToDense FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUTSHAPE = 4, + VT_SPARSEVALUE = 6, + VT_DEFAULTVALUE = 8, + VT_VALIDATEINDICES = 10 + }; + const flatbuffers::Vector *outputShape() const { + return GetPointer *>(VT_OUTPUTSHAPE); + } + const flatbuffers::Vector *sparseValue() const { + return GetPointer *>(VT_SPARSEVALUE); + } + const flatbuffers::Vector *defaultValue() const { + return GetPointer *>(VT_DEFAULTVALUE); + } + bool validateIndices() const { + return GetField(VT_VALIDATEINDICES, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_OUTPUTSHAPE) && + verifier.VerifyVector(outputShape()) && + VerifyOffset(verifier, VT_SPARSEVALUE) && + verifier.VerifyVector(sparseValue()) && + VerifyOffset(verifier, VT_DEFAULTVALUE) && + verifier.VerifyVector(defaultValue()) && + VerifyField(verifier, VT_VALIDATEINDICES) && + verifier.EndTable(); + } +}; + +struct SparseToDenseBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_outputShape(flatbuffers::Offset> outputShape) { + fbb_.AddOffset(SparseToDense::VT_OUTPUTSHAPE, outputShape); + } + void add_sparseValue(flatbuffers::Offset> sparseValue) { + fbb_.AddOffset(SparseToDense::VT_SPARSEVALUE, sparseValue); + } + void add_defaultValue(flatbuffers::Offset> defaultValue) { + fbb_.AddOffset(SparseToDense::VT_DEFAULTVALUE, defaultValue); + } + void add_validateIndices(bool validateIndices) { + fbb_.AddElement(SparseToDense::VT_VALIDATEINDICES, static_cast(validateIndices), 0); + } + explicit SparseToDenseBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SparseToDenseBuilder &operator=(const SparseToDenseBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSparseToDense( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> outputShape = 0, + flatbuffers::Offset> sparseValue = 0, + flatbuffers::Offset> defaultValue = 0, + bool validateIndices = false) { + SparseToDenseBuilder builder_(_fbb); + builder_.add_defaultValue(defaultValue); + builder_.add_sparseValue(sparseValue); + builder_.add_outputShape(outputShape); + builder_.add_validateIndices(validateIndices); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSparseToDenseDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *outputShape = nullptr, + const std::vector *sparseValue = nullptr, + const std::vector *defaultValue = nullptr, + bool validateIndices = false) { + auto outputShape__ = outputShape ? _fbb.CreateVector(*outputShape) : 0; + auto sparseValue__ = sparseValue ? _fbb.CreateVector(*sparseValue) : 0; + auto defaultValue__ = defaultValue ? _fbb.CreateVector(*defaultValue) : 0; + return mindspore::schema::CreateSparseToDense( + _fbb, + outputShape__, + sparseValue__, + defaultValue__, + validateIndices); +} + +struct ReverseSequence FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SEQAXIS = 4, + VT_BATCHAXIS = 6, + VT_SEQLENGTHS = 8 + }; + int32_t seqAxis() const { + return GetField(VT_SEQAXIS, 0); + } + int32_t batchAxis() const { + return GetField(VT_BATCHAXIS, 0); + } + const flatbuffers::Vector *seqLengths() const { + return GetPointer *>(VT_SEQLENGTHS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SEQAXIS) && + VerifyField(verifier, VT_BATCHAXIS) && + VerifyOffset(verifier, VT_SEQLENGTHS) && + verifier.VerifyVector(seqLengths()) && + verifier.EndTable(); + } +}; + +struct ReverseSequenceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seqAxis(int32_t seqAxis) { + fbb_.AddElement(ReverseSequence::VT_SEQAXIS, seqAxis, 0); + } + void add_batchAxis(int32_t batchAxis) { + fbb_.AddElement(ReverseSequence::VT_BATCHAXIS, batchAxis, 0); + } + void add_seqLengths(flatbuffers::Offset> seqLengths) { + fbb_.AddOffset(ReverseSequence::VT_SEQLENGTHS, seqLengths); + } + explicit ReverseSequenceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReverseSequenceBuilder &operator=(const ReverseSequenceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReverseSequence( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t seqAxis = 0, + int32_t batchAxis = 0, + flatbuffers::Offset> seqLengths = 0) { + ReverseSequenceBuilder builder_(_fbb); + builder_.add_seqLengths(seqLengths); + builder_.add_batchAxis(batchAxis); + builder_.add_seqAxis(seqAxis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReverseSequenceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t seqAxis = 0, + int32_t batchAxis = 0, + const std::vector *seqLengths = nullptr) { + auto seqLengths__ = seqLengths ? _fbb.CreateVector(*seqLengths) : 0; + return mindspore::schema::CreateReverseSequence( + _fbb, + seqAxis, + batchAxis, + seqLengths__); +} + +struct Rank FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct RankBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RankBuilder &operator=(const RankBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRank( + flatbuffers::FlatBufferBuilder &_fbb) { + RankBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct Gather FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_BATCHDIMS = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + int32_t batchDims() const { + return GetField(VT_BATCHDIMS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_BATCHDIMS) && + verifier.EndTable(); + } +}; + +struct GatherBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Gather::VT_AXIS, axis, 0); + } + void add_batchDims(int32_t batchDims) { + fbb_.AddElement(Gather::VT_BATCHDIMS, batchDims, 0); + } + explicit GatherBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GatherBuilder &operator=(const GatherBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGather( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t batchDims = 0) { + GatherBuilder builder_(_fbb); + builder_.add_batchDims(batchDims); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct GatherNd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BATCHDIMS = 4 + }; + int32_t batchDims() const { + return GetField(VT_BATCHDIMS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BATCHDIMS) && + verifier.EndTable(); + } +}; + +struct GatherNdBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_batchDims(int32_t batchDims) { + fbb_.AddElement(GatherNd::VT_BATCHDIMS, batchDims, 0); + } + explicit GatherNdBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GatherNdBuilder &operator=(const GatherNdBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGatherNd( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t batchDims = 0) { + GatherNdBuilder builder_(_fbb); + builder_.add_batchDims(batchDims); + return builder_.Finish(); +} + +struct Fill FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + verifier.EndTable(); + } +}; + +struct FillBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(Fill::VT_DIMS, dims); + } + explicit FillBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FillBuilder &operator=(const FillBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFill( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0) { + FillBuilder builder_(_fbb); + builder_.add_dims(dims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateFillDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + return mindspore::schema::CreateFill( + _fbb, + dims__); +} + +struct DepthToSpace FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCKSIZE = 4, + VT_FORMAT = 6 + }; + int32_t blockSize() const { + return GetField(VT_BLOCKSIZE, 0); + } + Format format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCKSIZE) && + VerifyField(verifier, VT_FORMAT) && + verifier.EndTable(); + } +}; + +struct DepthToSpaceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blockSize(int32_t blockSize) { + fbb_.AddElement(DepthToSpace::VT_BLOCKSIZE, blockSize, 0); + } + void add_format(Format format) { + fbb_.AddElement(DepthToSpace::VT_FORMAT, static_cast(format), 0); + } + explicit DepthToSpaceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DepthToSpaceBuilder &operator=(const DepthToSpaceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthToSpace( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t blockSize = 0, + Format format = Format_NCHW) { + DepthToSpaceBuilder builder_(_fbb); + builder_.add_format(format); + builder_.add_blockSize(blockSize); + return builder_.Finish(); +} + +struct BatchToSpace FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCKSHAPE = 4, + VT_CROPS = 6 + }; + const flatbuffers::Vector *blockShape() const { + return GetPointer *>(VT_BLOCKSHAPE); + } + const flatbuffers::Vector *crops() const { + return GetPointer *>(VT_CROPS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCKSHAPE) && + verifier.VerifyVector(blockShape()) && + VerifyOffset(verifier, VT_CROPS) && + verifier.VerifyVector(crops()) && + verifier.EndTable(); + } +}; + +struct BatchToSpaceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blockShape(flatbuffers::Offset> blockShape) { + fbb_.AddOffset(BatchToSpace::VT_BLOCKSHAPE, blockShape); + } + void add_crops(flatbuffers::Offset> crops) { + fbb_.AddOffset(BatchToSpace::VT_CROPS, crops); + } + explicit BatchToSpaceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BatchToSpaceBuilder &operator=(const BatchToSpaceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchToSpace( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> blockShape = 0, + flatbuffers::Offset> crops = 0) { + BatchToSpaceBuilder builder_(_fbb); + builder_.add_crops(crops); + builder_.add_blockShape(blockShape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBatchToSpaceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *blockShape = nullptr, + const std::vector *crops = nullptr) { + auto blockShape__ = blockShape ? _fbb.CreateVector(*blockShape) : 0; + auto crops__ = crops ? _fbb.CreateVector(*crops) : 0; + return mindspore::schema::CreateBatchToSpace( + _fbb, + blockShape__, + crops__); +} + +struct AddN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_N = 4 + }; + int32_t N() const { + return GetField(VT_N, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_N) && + verifier.EndTable(); + } +}; + +struct AddNBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_N(int32_t N) { + fbb_.AddElement(AddN::VT_N, N, 0); + } + explicit AddNBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AddNBuilder &operator=(const AddNBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddN( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t N = 0) { + AddNBuilder builder_(_fbb); + builder_.add_N(N); + return builder_.Finish(); +} + +struct EmbeddingLookup FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IDS = 4, + VT_MAXNORM = 6 + }; + const flatbuffers::Vector *ids() const { + return GetPointer *>(VT_IDS); + } + float maxNorm() const { + return GetField(VT_MAXNORM, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_IDS) && + verifier.VerifyVector(ids()) && + VerifyField(verifier, VT_MAXNORM) && + verifier.EndTable(); + } +}; + +struct EmbeddingLookupBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_ids(flatbuffers::Offset> ids) { + fbb_.AddOffset(EmbeddingLookup::VT_IDS, ids); + } + void add_maxNorm(float maxNorm) { + fbb_.AddElement(EmbeddingLookup::VT_MAXNORM, maxNorm, 0.0f); + } + explicit EmbeddingLookupBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EmbeddingLookupBuilder &operator=(const EmbeddingLookupBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEmbeddingLookup( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> ids = 0, + float maxNorm = 0.0f) { + EmbeddingLookupBuilder builder_(_fbb); + builder_.add_maxNorm(maxNorm); + builder_.add_ids(ids); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEmbeddingLookupDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *ids = nullptr, + float maxNorm = 0.0f) { + auto ids__ = ids ? _fbb.CreateVector(*ids) : 0; + return mindspore::schema::CreateEmbeddingLookup( + _fbb, + ids__, + maxNorm); +} + +struct EmbeddingLookupSparse FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SPIDS = 4, + VT_SPWEIGHTS = 6, + VT_MAXNORTM = 8 + }; + const flatbuffers::Vector *spIds() const { + return GetPointer *>(VT_SPIDS); + } + const flatbuffers::Vector *spWeights() const { + return GetPointer *>(VT_SPWEIGHTS); + } + float maxNortm() const { + return GetField(VT_MAXNORTM, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SPIDS) && + verifier.VerifyVector(spIds()) && + VerifyOffset(verifier, VT_SPWEIGHTS) && + verifier.VerifyVector(spWeights()) && + VerifyField(verifier, VT_MAXNORTM) && + verifier.EndTable(); + } +}; + +struct EmbeddingLookupSparseBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_spIds(flatbuffers::Offset> spIds) { + fbb_.AddOffset(EmbeddingLookupSparse::VT_SPIDS, spIds); + } + void add_spWeights(flatbuffers::Offset> spWeights) { + fbb_.AddOffset(EmbeddingLookupSparse::VT_SPWEIGHTS, spWeights); + } + void add_maxNortm(float maxNortm) { + fbb_.AddElement(EmbeddingLookupSparse::VT_MAXNORTM, maxNortm, 0.0f); + } + explicit EmbeddingLookupSparseBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EmbeddingLookupSparseBuilder &operator=(const EmbeddingLookupSparseBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEmbeddingLookupSparse( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> spIds = 0, + flatbuffers::Offset> spWeights = 0, + float maxNortm = 0.0f) { + EmbeddingLookupSparseBuilder builder_(_fbb); + builder_.add_maxNortm(maxNortm); + builder_.add_spWeights(spWeights); + builder_.add_spIds(spIds); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEmbeddingLookupSparseDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *spIds = nullptr, + const std::vector *spWeights = nullptr, + float maxNortm = 0.0f) { + auto spIds__ = spIds ? _fbb.CreateVector(*spIds) : 0; + auto spWeights__ = spWeights ? _fbb.CreateVector(*spWeights) : 0; + return mindspore::schema::CreateEmbeddingLookupSparse( + _fbb, + spIds__, + spWeights__, + maxNortm); +} + +struct FloorDiv FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FloorDivBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorDivBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FloorDivBuilder &operator=(const FloorDivBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloorDiv( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorDivBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FloorMod FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct FloorModBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorModBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FloorModBuilder &operator=(const FloorModBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloorMod( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorModBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct L2Norm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_EPSILON = 6 + }; + const flatbuffers::Vector *axis() const { + return GetPointer *>(VT_AXIS); + } + float epsilon() const { + return GetField(VT_EPSILON, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_AXIS) && + verifier.VerifyVector(axis()) && + VerifyField(verifier, VT_EPSILON) && + verifier.EndTable(); + } +}; + +struct L2NormBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(flatbuffers::Offset> axis) { + fbb_.AddOffset(L2Norm::VT_AXIS, axis); + } + void add_epsilon(float epsilon) { + fbb_.AddElement(L2Norm::VT_EPSILON, epsilon, 0.0f); + } + explicit L2NormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + L2NormBuilder &operator=(const L2NormBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateL2Norm( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> axis = 0, + float epsilon = 0.0f) { + L2NormBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateL2NormDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *axis = nullptr, + float epsilon = 0.0f) { + auto axis__ = axis ? _fbb.CreateVector(*axis) : 0; + return mindspore::schema::CreateL2Norm( + _fbb, + axis__, + epsilon); +} + +struct LogicalAnd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalAndBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalAndBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LogicalAndBuilder &operator=(const LogicalAndBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalAnd( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalAndBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalOr FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalOrBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalOrBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LogicalOrBuilder &operator=(const LogicalOrBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalOr( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalOrBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalXor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalXorBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalXorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LogicalXorBuilder &operator=(const LogicalXorBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalXor( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalXorBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalNot FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct LogicalNotBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalNotBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LogicalNotBuilder &operator=(const LogicalNotBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalNot( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalNotBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MatrixDiag FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_K = 4, + VT_NUMROWS = 6, + VT_NUMCOLS = 8, + VT_PADDINGVALUE = 10 + }; + int32_t k() const { + return GetField(VT_K, 0); + } + int32_t numRows() const { + return GetField(VT_NUMROWS, 0); + } + int32_t numCols() const { + return GetField(VT_NUMCOLS, 0); + } + float paddingValue() const { + return GetField(VT_PADDINGVALUE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_K) && + VerifyField(verifier, VT_NUMROWS) && + VerifyField(verifier, VT_NUMCOLS) && + VerifyField(verifier, VT_PADDINGVALUE) && + verifier.EndTable(); + } +}; + +struct MatrixDiagBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_k(int32_t k) { + fbb_.AddElement(MatrixDiag::VT_K, k, 0); + } + void add_numRows(int32_t numRows) { + fbb_.AddElement(MatrixDiag::VT_NUMROWS, numRows, 0); + } + void add_numCols(int32_t numCols) { + fbb_.AddElement(MatrixDiag::VT_NUMCOLS, numCols, 0); + } + void add_paddingValue(float paddingValue) { + fbb_.AddElement(MatrixDiag::VT_PADDINGVALUE, paddingValue, 0.0f); + } + explicit MatrixDiagBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MatrixDiagBuilder &operator=(const MatrixDiagBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMatrixDiag( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t k = 0, + int32_t numRows = 0, + int32_t numCols = 0, + float paddingValue = 0.0f) { + MatrixDiagBuilder builder_(_fbb); + builder_.add_paddingValue(paddingValue); + builder_.add_numCols(numCols); + builder_.add_numRows(numRows); + builder_.add_k(k); + return builder_.Finish(); +} + +struct Select FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } +}; + +struct SelectBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SelectBuilder &operator=(const SelectBuilder &); + flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset