提交 680935cc 编写于 作者: 朔-望's avatar 朔-望

Initial commit

上级
#!/bin/bash
set -e
readonly VERSION="3.8"
version=$(clang-format -version)
if ! [[ $version == *"$VERSION"* ]]; then
echo "clang-format version check failed."
echo "a version contains '$VERSION' is needed, but get '$version'"
echo "you can install the right version, and make an soft-link to '\$PATH' env"
exit -1
fi
clang-format $@
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
.DS_Store
build/
.idea/
CMakeCache.txt
CMakeFiles/
Makefile
cmake_install.cmake
*.cbp
paddle-mobile.cbp
.idea
exclude: 'third-party'
repos:
- repo: https://github.com/Lucas-C/pre-commit-hooks.git
sha: v1.0.1
hooks:
- id: remove-crlf
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
- id: remove-tabs
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0
hooks:
- id: check-added-large-files
- id: check-merge-conflict
- id: check-symlinks
- id: detect-private-key
files: (?!.*tar.gz)^.*$
- id: end-of-file-fixer
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
- id: trailing-whitespace
files: (src).*\.(md|py|mm|swift|java|c|cc|cxx|cpp|cu|h|hpp|hxx)$
- repo: local
hooks:
- id: clang-format-with-version-check
name: clang-format
description: Format files with ClangFormat.
entry: bash .clang_format.hook -i
language: system
files: (src).*\.(c|cc|cxx|cpp|h|hpp|hxx)$
language: cpp
cache: ccache
sudo: required
dist: trusty
os:
- linux
env:
- JOB=check_style
addons:
apt:
packages:
- git
- python
- python-pip
- python2.7-dev
- clang-format-3.8
before_install:
- sudo pip install -U virtualenv pre-commit pip
script:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
- |
function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; }
- |
timeout 600 .travis/${JOB}.sh # 10min timeout
RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else exit 1; fi;
notifications:
email:
on_success: change
on_failure: always
#!/bin/bash
function abort(){
echo "Your change doesn't follow PaddlePaddle's code style" 1>&2
echo "Please use pre-commit to auto-format your code." 1>&2
exit 1
}
trap 'abort' 0
set -e
cd `dirname $0`
cd ..
export PATH=/usr/bin:$PATH
pre-commit install
clang-format --version
if ! pre-commit run -a ; then
ls -lh
git diff --exit-code
exit 1
fi
trap : 0
cmake_minimum_required(VERSION 3.0)
project(paddle-mobile)
add_definitions(-std=c++11)
add_definitions(-DPADDLE_MOBILE_DEBUG="true")
set(CMAKE_BUILD_TYPE RelWithDebInfo)
set(CMAKE_VERBOSE_MAKEFILE on)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY build)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY build)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY build)
file(GLOB_RECURSE PADDLE_MOBILE_CC src/*.cc src/*.cpp)
file(GLOB_RECURSE PADDLE_MOBILE_H src/*.h)
# include headers
include_directories(src/)
include(ExternalProject)
ExternalProject_Add(openblas_proj
GIT_REPOSITORY "https://github.com/xianyi/OpenBLAS.git"
GIT_TAG "v0.2.20"
SOURCE_DIR "openblas/"
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ""
BUILD_COMMAND "make" "ONLY_CBLAS=1"
INSTALL_COMMAND "make" "PREFIX=${CMAKE_BINARY_DIR}/" "install"
)
set_target_properties(openblas_proj PROPERTIES EXCLUDE_FROM_ALL 1)
# link openblas
include_directories(${CMAKE_BINARY_DIR}/openblas)
link_directories(${CMAKE_BINARY_DIR}/lib)
# link protobuf
include_directories(third-party/protobuf/include)
link_directories(third-party/protobuf/lib)
# gen so
ADD_LIBRARY(paddle-mobile SHARED ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
target_link_libraries(paddle-mobile protobuf-lite openblas)
add_dependencies(paddle-mobile openblas_proj)
# gen static
ADD_LIBRARY(paddle-mobile-static STATIC ${PADDLE_MOBILE_CC} ${PADDLE_MOBILE_H})
target_link_libraries(paddle-mobile-static protobuf-lite openblas)
add_dependencies(paddle-mobile openblas_proj)
# gen test
ADD_EXECUTABLE(paddle-mobile-test test/main.cpp test/test_helper.h)
target_link_libraries(paddle-mobile-test paddle-mobile)
# Paddle-Mobile
This folder is used to develop the next big version.
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configurable variables.
# Modeled after the ndk-build system.
# For any variables defined in:
# https://developer.android.com/ndk/guides/android_mk.html
# https://developer.android.com/ndk/guides/application_mk.html
# if it makes sense for CMake, then replace LOCAL, APP, or NDK with ANDROID, and
# we have that variable below.
# The exception is ANDROID_TOOLCHAIN vs NDK_TOOLCHAIN_VERSION.
# Since we only have one version of each gcc and clang, specifying a version
# doesn't make much sense.
#
# ANDROID_TOOLCHAIN
# ANDROID_ABI
# ANDROID_PLATFORM
# ANDROID_STL
# ANDROID_PIE
# ANDROID_CPP_FEATURES
# ANDROID_ALLOW_UNDEFINED_SYMBOLS
# ANDROID_ARM_MODE
# ANDROID_ARM_NEON
# ANDROID_DISABLE_NO_EXECUTE
# ANDROID_DISABLE_RELRO
# ANDROID_DISABLE_FORMAT_STRING_CHECKS
# ANDROID_CCACHE
cmake_minimum_required(VERSION 3.6.0)
# Inhibit all of CMake's own NDK handling code.
set(CMAKE_SYSTEM_VERSION 1)
# CMake invokes the toolchain file twice during the first build, but only once
# during subsequent rebuilds. This was causing the various flags to be added
# twice on the first build, and on a rebuild ninja would see only one set of the
# flags and rebuild the world.
# https://github.com/android-ndk/ndk/issues/323
if(ANDROID_NDK_TOOLCHAIN_INCLUDED)
return()
endif(ANDROID_NDK_TOOLCHAIN_INCLUDED)
set(ANDROID_NDK_TOOLCHAIN_INCLUDED true)
# Android NDK
if(NOT ANDROID_NDK)
get_filename_component(ANDROID_NDK "$ENV{NDK_ROOT}" ABSOLUTE)
else()
# Allow the user to specify their own NDK path, but emit a warning. This is an
# uncommon use case, but helpful if users want to use a bleeding edge
# toolchain file with a stable NDK.
# https://github.com/android-ndk/ndk/issues/473
message(WARNING "Using custom NDK path (ANDROID_NDK is set): ${ANDROID_NDK}")
endif()
file(TO_CMAKE_PATH "${ANDROID_NDK}" ANDROID_NDK)
# Android NDK revision
file(READ "${ANDROID_NDK}/source.properties" ANDROID_NDK_SOURCE_PROPERTIES)
set(ANDROID_NDK_SOURCE_PROPERTIES_REGEX
"^Pkg\\.Desc = Android NDK\nPkg\\.Revision = ([0-9]+)\\.")
if(NOT ANDROID_NDK_SOURCE_PROPERTIES MATCHES "${ANDROID_NDK_SOURCE_PROPERTIES_REGEX}")
message(SEND_ERROR "Failed to parse Android NDK revision: ${ANDROID_NDK}/source.properties.\n${ANDROID_NDK_SOURCE_PROPERTIES}")
endif()
string(REGEX REPLACE "${ANDROID_NDK_SOURCE_PROPERTIES_REGEX}" "\\1"
ANDROID_NDK_REVISION "${ANDROID_NDK_SOURCE_PROPERTIES}")
# Touch toolchain variable to suppress "unused variable" warning.
# This happens if CMake is invoked with the same command line the second time.
if(CMAKE_TOOLCHAIN_FILE)
endif()
# Compatibility for configurable variables.
# Compatible with configurable variables from the other toolchain file:
# https://github.com/taka-no-me/android-cmake
# TODO: We should consider dropping compatibility to simplify things once most
# of our users have migrated to our standard set of configurable variables.
if(ANDROID_TOOLCHAIN_NAME AND NOT ANDROID_TOOLCHAIN)
if(ANDROID_TOOLCHAIN_NAME MATCHES "-clang([0-9].[0-9])?$")
set(ANDROID_TOOLCHAIN clang)
elseif(ANDROID_TOOLCHAIN_NAME MATCHES "-[0-9].[0-9]$")
set(ANDROID_TOOLCHAIN gcc)
endif()
endif()
if(ANDROID_ABI STREQUAL "armeabi-v7a with NEON")
set(ANDROID_ABI armeabi-v7a)
set(ANDROID_ARM_NEON TRUE)
elseif(ANDROID_TOOLCHAIN_NAME AND NOT ANDROID_ABI)
if(ANDROID_TOOLCHAIN_NAME MATCHES "^arm-linux-androideabi-")
set(ANDROID_ABI armeabi-v7a)
elseif(ANDROID_TOOLCHAIN_NAME MATCHES "^aarch64-linux-android-")
set(ANDROID_ABI arm64-v8a)
elseif(ANDROID_TOOLCHAIN_NAME MATCHES "^x86-")
set(ANDROID_ABI x86)
elseif(ANDROID_TOOLCHAIN_NAME MATCHES "^x86_64-")
set(ANDROID_ABI x86_64)
elseif(ANDROID_TOOLCHAIN_NAME MATCHES "^mipsel-linux-android-")
set(ANDROID_ABI mips)
elseif(ANDROID_TOOLCHAIN_NAME MATCHES "^mips64el-linux-android-")
set(ANDROID_ABI mips64)
endif()
endif()
if(ANDROID_NATIVE_API_LEVEL AND NOT ANDROID_PLATFORM)
if(ANDROID_NATIVE_API_LEVEL MATCHES "^android-[0-9]+$")
set(ANDROID_PLATFORM ${ANDROID_NATIVE_API_LEVEL})
elseif(ANDROID_NATIVE_API_LEVEL MATCHES "^[0-9]+$")
set(ANDROID_PLATFORM android-${ANDROID_NATIVE_API_LEVEL})
endif()
endif()
if(DEFINED ANDROID_APP_PIE AND NOT DEFINED ANDROID_PIE)
set(ANDROID_PIE "${ANDROID_APP_PIE}")
endif()
if(ANDROID_STL_FORCE_FEATURES AND NOT DEFINED ANDROID_CPP_FEATURES)
set(ANDROID_CPP_FEATURES "rtti exceptions")
endif()
if(DEFINED ANDROID_NO_UNDEFINED AND NOT DEFINED ANDROID_ALLOW_UNDEFINED_SYMBOLS)
if(ANDROID_NO_UNDEFINED)
set(ANDROID_ALLOW_UNDEFINED_SYMBOLS FALSE)
else()
set(ANDROID_ALLOW_UNDEFINED_SYMBOLS TRUE)
endif()
endif()
if(DEFINED ANDROID_SO_UNDEFINED AND NOT DEFINED ANDROID_ALLOW_UNDEFINED_SYMBOLS)
set(ANDROID_ALLOW_UNDEFINED_SYMBOLS "${ANDROID_SO_UNDEFINED}")
endif()
if(DEFINED ANDROID_FORCE_ARM_BUILD AND NOT ANDROID_ARM_MODE)
if(ANDROID_FORCE_ARM_BUILD)
set(ANDROID_ARM_MODE arm)
else()
set(ANDROID_ARM_MODE thumb)
endif()
endif()
if(DEFINED ANDROID_NOEXECSTACK AND NOT DEFINED ANDROID_DISABLE_NO_EXECUTE)
if(ANDROID_NOEXECSTACK)
set(ANDROID_DISABLE_NO_EXECUTE FALSE)
else()
set(ANDROID_DISABLE_NO_EXECUTE TRUE)
endif()
endif()
if(DEFINED ANDROID_RELRO AND NOT DEFINED ANDROID_DISABLE_RELRO)
if(ANDROID_RELRO)
set(ANDROID_DISABLE_RELRO FALSE)
else()
set(ANDROID_DISABLE_RELRO TRUE)
endif()
endif()
if(NDK_CCACHE AND NOT ANDROID_CCACHE)
set(ANDROID_CCACHE "${NDK_CCACHE}")
endif()
# Default values for configurable variables.
if(NOT ANDROID_TOOLCHAIN)
set(ANDROID_TOOLCHAIN clang)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI armeabi-v7a)
endif()
if(ANDROID_PLATFORM MATCHES "^android-([0-9]|1[0-3])$")
set(ANDROID_PLATFORM android-14)
elseif(ANDROID_PLATFORM STREQUAL android-20)
set(ANDROID_PLATFORM android-19)
elseif(ANDROID_PLATFORM STREQUAL android-25)
set(ANDROID_PLATFORM android-24)
elseif(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-14)
endif()
string(REPLACE "android-" "" ANDROID_PLATFORM_LEVEL ${ANDROID_PLATFORM})
if(ANDROID_ABI MATCHES "64(-v8a)?$" AND ANDROID_PLATFORM_LEVEL LESS 21)
set(ANDROID_PLATFORM android-21)
set(ANDROID_PLATFORM_LEVEL 21)
endif()
if(NOT ANDROID_STL)
set(ANDROID_STL gnustl_static)
endif()
if(NOT DEFINED ANDROID_PIE)
if(ANDROID_PLATFORM_LEVEL LESS 16)
set(ANDROID_PIE FALSE)
else()
set(ANDROID_PIE TRUE)
endif()
endif()
if(NOT ANDROID_ARM_MODE)
set(ANDROID_ARM_MODE thumb)
endif()
# Export configurable variables for the try_compile() command.
set(CMAKE_TRY_COMPILE_PLATFORM_VARIABLES
ANDROID_TOOLCHAIN
ANDROID_ABI
ANDROID_PLATFORM
ANDROID_STL
ANDROID_PIE
ANDROID_CPP_FEATURES
ANDROID_ALLOW_UNDEFINED_SYMBOLS
ANDROID_ARM_MODE
ANDROID_ARM_NEON
ANDROID_DISABLE_NO_EXECUTE
ANDROID_DISABLE_RELRO
ANDROID_DISABLE_FORMAT_STRING_CHECKS
ANDROID_CCACHE)
# Standard cross-compiling stuff.
set(ANDROID TRUE)
set(CMAKE_SYSTEM_NAME Android)
# Allow users to override these values in case they want more strict behaviors.
# For example, they may want to prevent the NDK's libz from being picked up so
# they can use their own.
# https://github.com/android-ndk/ndk/issues/517
if(NOT CMAKE_FIND_ROOT_PATH_MODE_PROGRAM)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
endif()
if(NOT CMAKE_FIND_ROOT_PATH_MODE_LIBRARY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
endif()
if(NOT CMAKE_FIND_ROOT_PATH_MODE_INCLUDE)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
endif()
if(NOT CMAKE_FIND_ROOT_PATH_MODE_PACKAGE)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
endif()
# ABI.
set(CMAKE_ANDROID_ARCH_ABI ${ANDROID_ABI})
if(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
set(ANDROID_SYSROOT_ABI arm)
set(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi)
set(ANDROID_TOOLCHAIN_ROOT ${ANDROID_TOOLCHAIN_NAME})
set(ANDROID_HEADER_TRIPLE arm-linux-androideabi)
if(ANDROID_ABI STREQUAL armeabi)
message(WARNING "armeabi is deprecated and will be removed in a future NDK "
"release.")
set(CMAKE_SYSTEM_PROCESSOR armv5te)
set(ANDROID_LLVM_TRIPLE armv5te-none-linux-androideabi)
elseif(ANDROID_ABI STREQUAL armeabi-v7a)
set(CMAKE_SYSTEM_PROCESSOR armv7-a)
set(ANDROID_LLVM_TRIPLE armv7-none-linux-androideabi)
endif()
elseif(ANDROID_ABI STREQUAL arm64-v8a)
set(ANDROID_SYSROOT_ABI arm64)
set(CMAKE_SYSTEM_PROCESSOR aarch64)
set(ANDROID_TOOLCHAIN_NAME aarch64-linux-android)
set(ANDROID_TOOLCHAIN_ROOT ${ANDROID_TOOLCHAIN_NAME})
set(ANDROID_LLVM_TRIPLE aarch64-none-linux-android)
set(ANDROID_HEADER_TRIPLE aarch64-linux-android)
elseif(ANDROID_ABI STREQUAL x86)
set(ANDROID_SYSROOT_ABI x86)
set(CMAKE_SYSTEM_PROCESSOR i686)
set(ANDROID_TOOLCHAIN_NAME i686-linux-android)
set(ANDROID_TOOLCHAIN_ROOT ${ANDROID_ABI})
set(ANDROID_LLVM_TRIPLE i686-none-linux-android)
set(ANDROID_HEADER_TRIPLE i686-linux-android)
elseif(ANDROID_ABI STREQUAL x86_64)
set(ANDROID_SYSROOT_ABI x86_64)
set(CMAKE_SYSTEM_PROCESSOR x86_64)
set(ANDROID_TOOLCHAIN_NAME x86_64-linux-android)
set(ANDROID_TOOLCHAIN_ROOT ${ANDROID_ABI})
set(ANDROID_LLVM_TRIPLE x86_64-none-linux-android)
set(ANDROID_HEADER_TRIPLE x86_64-linux-android)
elseif(ANDROID_ABI STREQUAL mips)
message(WARNING "mips is deprecated and will be removed in a future NDK "
"release.")
set(ANDROID_SYSROOT_ABI mips)
set(CMAKE_SYSTEM_PROCESSOR mips)
set(ANDROID_TOOLCHAIN_NAME mips64el-linux-android)
set(ANDROID_TOOLCHAIN_ROOT ${ANDROID_TOOLCHAIN_NAME})
set(ANDROID_LLVM_TRIPLE mipsel-none-linux-android)
set(ANDROID_HEADER_TRIPLE mipsel-linux-android)
elseif(ANDROID_ABI STREQUAL mips64)
message(WARNING "mips64 is deprecated and will be removed in a future NDK "
"release.")
set(ANDROID_SYSROOT_ABI mips64)
set(CMAKE_SYSTEM_PROCESSOR mips64)
set(ANDROID_TOOLCHAIN_NAME mips64el-linux-android)
set(ANDROID_TOOLCHAIN_ROOT ${ANDROID_TOOLCHAIN_NAME})
set(ANDROID_LLVM_TRIPLE mips64el-none-linux-android)
set(ANDROID_HEADER_TRIPLE mips64el-linux-android)
else()
message(FATAL_ERROR "Invalid Android ABI: ${ANDROID_ABI}.")
endif()
set(ANDROID_COMPILER_FLAGS)
set(ANDROID_COMPILER_FLAGS_CXX)
set(ANDROID_COMPILER_FLAGS_DEBUG)
set(ANDROID_COMPILER_FLAGS_RELEASE)
set(ANDROID_LINKER_FLAGS)
set(ANDROID_LINKER_FLAGS_EXE)
# Don't re-export libgcc symbols in every binary.
list(APPEND ANDROID_LINKER_FLAGS -Wl,--exclude-libs,libgcc.a)
list(APPEND ANDROID_LINKER_FLAGS -Wl,--exclude-libs,libatomic.a)
# STL.
set(ANDROID_STL_STATIC_LIBRARIES)
set(ANDROID_STL_SHARED_LIBRARIES)
if(ANDROID_STL STREQUAL system)
if(NOT "x${ANDROID_CPP_FEATURES}" STREQUAL "x")
set(ANDROID_STL_STATIC_LIBRARIES supc++)
endif()
elseif(ANDROID_STL STREQUAL stlport_static)
set(ANDROID_STL_STATIC_LIBRARIES stlport_static)
elseif(ANDROID_STL STREQUAL stlport_shared)
set(ANDROID_STL_SHARED_LIBRARIES stlport_shared)
elseif(ANDROID_STL STREQUAL gnustl_static)
set(ANDROID_STL_STATIC_LIBRARIES gnustl_static)
elseif(ANDROID_STL STREQUAL gnustl_shared)
set(ANDROID_STL_STATIC_LIBRARIES supc++)
set(ANDROID_STL_SHARED_LIBRARIES gnustl_shared)
elseif(ANDROID_STL STREQUAL c++_static)
set(ANDROID_STL_STATIC_LIBRARIES c++)
elseif(ANDROID_STL STREQUAL c++_shared)
set(ANDROID_STL_SHARED_LIBRARIES c++)
elseif(ANDROID_STL STREQUAL none)
else()
message(FATAL_ERROR "Invalid Android STL: ${ANDROID_STL}.")
endif()
# Behavior of CMAKE_SYSTEM_LIBRARY_PATH and CMAKE_LIBRARY_PATH are really weird
# when CMAKE_SYSROOT is set. The library path is appended to the sysroot even if
# the library path is an abspath. Using a relative path from the sysroot doesn't
# work either, because the relative path is abspath'd relative to the current
# CMakeLists.txt file before being appended :(
#
# We can try to get out of this problem by providing another root path for cmake
# to check. CMAKE_FIND_ROOT_PATH is intended for this purpose:
# https://cmake.org/cmake/help/v3.8/variable/CMAKE_FIND_ROOT_PATH.html
#
# In theory this should just be our sysroot, but since we don't have a single
# sysroot that is correct (there's only one set of headers, but multiple
# locations for libraries that need to be handled differently). Some day we'll
# want to move all the libraries into ${ANDROID_NDK}/sysroot, but we'll need to
# make some fixes to Clang, various build systems, and possibly CMake itself to
# get that working.
list(APPEND CMAKE_FIND_ROOT_PATH "${ANDROID_NDK}")
# Sysroot.
set(CMAKE_SYSROOT "${ANDROID_NDK}/sysroot")
# CMake 3.9 tries to use CMAKE_SYSROOT_COMPILE before it gets set from
# CMAKE_SYSROOT, which leads to using the system's /usr/include. Set this
# manually.
# https://github.com/android-ndk/ndk/issues/467
set(CMAKE_SYSROOT_COMPILE "${CMAKE_SYSROOT}")
# The compiler driver doesn't check any arch specific include locations (though
# maybe we should add that). Architecture specific headers like asm/ and
# machine/ are installed to an arch-$ARCH subdirectory of the sysroot.
list(APPEND ANDROID_COMPILER_FLAGS
"-isystem ${CMAKE_SYSROOT}/usr/include/${ANDROID_HEADER_TRIPLE}")
list(APPEND ANDROID_COMPILER_FLAGS
"-D__ANDROID_API__=${ANDROID_PLATFORM_LEVEL}")
# We need different sysroots for linking and compiling, but cmake doesn't
# support that. Pass the sysroot flag manually when linking.
set(ANDROID_SYSTEM_LIBRARY_PATH
"${ANDROID_NDK}/platforms/${ANDROID_PLATFORM}/arch-${ANDROID_SYSROOT_ABI}")
list(APPEND ANDROID_LINKER_FLAGS "--sysroot ${ANDROID_SYSTEM_LIBRARY_PATH}")
# find_library searches a handful of paths as described by
# https://cmake.org/cmake/help/v3.6/command/find_library.html. Since libraries
# are per-API level and headers aren't, We don't have libraries in the
# CMAKE_SYSROOT. Set up CMAKE_SYSTEM_LIBRARY_PATH
# (https://cmake.org/cmake/help/v3.6/variable/CMAKE_SYSTEM_LIBRARY_PATH.html)
# instead.
#
# NB: The suffix is just lib here instead of dealing with lib64 because
# apparently CMake does some automatic rewriting of that? I've been testing by
# building my own CMake with a bunch of logging added, and that seems to be the
# case.
list(APPEND CMAKE_SYSTEM_LIBRARY_PATH
"${ANDROID_SYSTEM_LIBRARY_PATH}/usr/lib")
# Toolchain.
if(CMAKE_HOST_SYSTEM_NAME STREQUAL Linux)
set(ANDROID_HOST_TAG linux-x86_64)
elseif(CMAKE_HOST_SYSTEM_NAME STREQUAL Darwin)
set(ANDROID_HOST_TAG darwin-x86_64)
elseif(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows)
set(ANDROID_HOST_TAG windows-x86_64)
endif()
set(ANDROID_TOOLCHAIN_ROOT "${ANDROID_NDK}/toolchains/${ANDROID_TOOLCHAIN_ROOT}-4.9/prebuilt/${ANDROID_HOST_TAG}")
set(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-")
if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows)
set(ANDROID_TOOLCHAIN_SUFFIX .exe)
endif()
set(ANDROID_HOST_PREBUILTS "${ANDROID_NDK}/prebuilt/${ANDROID_HOST_TAG}")
if(ANDROID_TOOLCHAIN STREQUAL clang)
set(ANDROID_LLVM_TOOLCHAIN_PREFIX "${ANDROID_NDK}/toolchains/llvm/prebuilt/${ANDROID_HOST_TAG}/bin/")
set(ANDROID_C_COMPILER "${ANDROID_LLVM_TOOLCHAIN_PREFIX}clang${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_CXX_COMPILER "${ANDROID_LLVM_TOOLCHAIN_PREFIX}clang++${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_ASM_COMPILER "${ANDROID_LLVM_TOOLCHAIN_PREFIX}clang${ANDROID_TOOLCHAIN_SUFFIX}")
# Clang can fail to compile if CMake doesn't correctly supply the target and
# external toolchain, but to do so, CMake needs to already know that the
# compiler is clang. Tell CMake that the compiler is really clang, but don't
# use CMakeForceCompiler, since we still want compile checks. We only want
# to skip the compiler ID detection step.
set(CMAKE_C_COMPILER_ID_RUN TRUE)
set(CMAKE_CXX_COMPILER_ID_RUN TRUE)
set(CMAKE_C_COMPILER_ID Clang)
set(CMAKE_CXX_COMPILER_ID Clang)
set(CMAKE_C_COMPILER_VERSION 3.8)
set(CMAKE_CXX_COMPILER_VERSION 3.8)
set(CMAKE_C_STANDARD_COMPUTED_DEFAULT 11)
set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT 98)
set(CMAKE_C_COMPILER_TARGET ${ANDROID_LLVM_TRIPLE})
set(CMAKE_CXX_COMPILER_TARGET ${ANDROID_LLVM_TRIPLE})
set(CMAKE_ASM_COMPILER_TARGET ${ANDROID_LLVM_TRIPLE})
set(CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN "${ANDROID_TOOLCHAIN_ROOT}")
set(CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN "${ANDROID_TOOLCHAIN_ROOT}")
set(CMAKE_ASM_COMPILER_EXTERNAL_TOOLCHAIN "${ANDROID_TOOLCHAIN_ROOT}")
set(ANDROID_AR "${ANDROID_TOOLCHAIN_PREFIX}ar${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_RANLIB "${ANDROID_TOOLCHAIN_PREFIX}ranlib${ANDROID_TOOLCHAIN_SUFFIX}")
elseif(ANDROID_TOOLCHAIN STREQUAL gcc)
set(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}gcc${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}g++${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_ASM_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}gcc${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_AR "${ANDROID_TOOLCHAIN_PREFIX}gcc-ar${ANDROID_TOOLCHAIN_SUFFIX}")
set(ANDROID_RANLIB "${ANDROID_TOOLCHAIN_PREFIX}gcc-ranlib${ANDROID_TOOLCHAIN_SUFFIX}")
else()
message(FATAL_ERROR "Invalid Android toolchain: ${ANDROID_TOOLCHAIN}.")
endif()
if(NOT IS_DIRECTORY "${ANDROID_NDK}/platforms/${ANDROID_PLATFORM}")
message(FATAL_ERROR "Invalid Android platform: ${ANDROID_PLATFORM}.")
elseif(NOT IS_DIRECTORY "${CMAKE_SYSROOT}")
message(FATAL_ERROR "Invalid Android sysroot: ${CMAKE_SYSROOT}.")
endif()
# Generic flags.
list(APPEND ANDROID_COMPILER_FLAGS
-g
-DANDROID
-ffunction-sections
-funwind-tables
-fstack-protector-strong
-no-canonical-prefixes)
list(APPEND ANDROID_LINKER_FLAGS
-Wl,--build-id
-Wl,--warn-shared-textrel
-Wl,--fatal-warnings)
list(APPEND ANDROID_LINKER_FLAGS_EXE
-Wl,--gc-sections
-Wl,-z,nocopyreloc)
# Debug and release flags.
list(APPEND ANDROID_COMPILER_FLAGS_DEBUG -O0)
if(ANDROID_ABI MATCHES "^armeabi")
list(APPEND ANDROID_COMPILER_FLAGS_RELEASE -Os)
else()
list(APPEND ANDROID_COMPILER_FLAGS_RELEASE -O2)
endif()
list(APPEND ANDROID_COMPILER_FLAGS_RELEASE -DNDEBUG)
if(ANDROID_TOOLCHAIN STREQUAL clang)
list(APPEND ANDROID_COMPILER_FLAGS_DEBUG -fno-limit-debug-info)
endif()
# Toolchain and ABI specific flags.
if(ANDROID_ABI STREQUAL armeabi)
list(APPEND ANDROID_COMPILER_FLAGS
-march=armv5te
-mtune=xscale
-msoft-float)
endif()
if(ANDROID_ABI STREQUAL armeabi-v7a)
list(APPEND ANDROID_COMPILER_FLAGS
-march=armv7-a
-mfloat-abi=softfp
-mfpu=vfpv3-d16)
list(APPEND ANDROID_LINKER_FLAGS
-Wl,--fix-cortex-a8)
endif()
if(ANDROID_ABI STREQUAL mips)
list(APPEND ANDROID_COMPILER_FLAGS
-mips32)
endif()
if(ANDROID_ABI STREQUAL "mips64" AND ANDROID_TOOLCHAIN STREQUAL clang)
list(APPEND ANDROID_COMPILER_FLAGS "-fintegrated-as")
endif()
if(ANDROID_ABI MATCHES "^armeabi" AND ANDROID_TOOLCHAIN STREQUAL clang)
# Disable integrated-as for better compatibility.
list(APPEND ANDROID_COMPILER_FLAGS
-fno-integrated-as)
endif()
if(ANDROID_ABI STREQUAL mips AND ANDROID_TOOLCHAIN STREQUAL clang)
# Help clang use mips64el multilib GCC
list(APPEND ANDROID_LINKER_FLAGS
"\"-L${ANDROID_TOOLCHAIN_ROOT}/lib/gcc/${ANDROID_TOOLCHAIN_NAME}/4.9.x/32/mips-r1\"")
endif()
if(ANDROID_ABI STREQUAL x86)
# http://b.android.com/222239
# http://b.android.com/220159 (internal http://b/31809417)
# x86 devices have stack alignment issues.
list(APPEND ANDROID_COMPILER_FLAGS -mstackrealign)
endif()
# STL specific flags.
if(ANDROID_STL STREQUAL system)
set(ANDROID_STL_PREFIX gnu-libstdc++/4.9)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES
"${ANDROID_NDK}/sources/cxx-stl/system/include")
elseif(ANDROID_STL MATCHES "^stlport_")
set(ANDROID_STL_PREFIX stlport)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/stlport"
"${ANDROID_NDK}/sources/cxx-stl/gabi++/include")
elseif(ANDROID_STL MATCHES "^gnustl_")
set(ANDROID_STL_PREFIX gnu-libstdc++/4.9)
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/include"
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/libs/${ANDROID_ABI}/include"
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/include/backward")
elseif(ANDROID_STL MATCHES "^c\\+\\+_")
set(ANDROID_STL_PREFIX llvm-libc++)
if(ANDROID_ABI MATCHES "^armeabi")
list(APPEND ANDROID_LINKER_FLAGS -Wl,--exclude-libs,libunwind.a)
endif()
list(APPEND ANDROID_COMPILER_FLAGS_CXX
-std=c++11)
if(ANDROID_TOOLCHAIN STREQUAL gcc)
list(APPEND ANDROID_COMPILER_FLAGS_CXX
-fno-strict-aliasing)
endif()
# Add the libc++ lib directory to the path so the linker scripts can pick up
# the extra libraries.
list(APPEND ANDROID_LINKER_FLAGS
"-L${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/libs/${ANDROID_ABI}")
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/include"
"${ANDROID_NDK}/sources/android/support/include"
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}abi/include")
endif()
set(ANDROID_CXX_STANDARD_LIBRARIES)
foreach(library ${ANDROID_STL_STATIC_LIBRARIES})
list(APPEND ANDROID_CXX_STANDARD_LIBRARIES
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/libs/${ANDROID_ABI}/lib${library}.a")
endforeach()
foreach(library ${ANDROID_STL_SHARED_LIBRARIES})
list(APPEND ANDROID_CXX_STANDARD_LIBRARIES
"${ANDROID_NDK}/sources/cxx-stl/${ANDROID_STL_PREFIX}/libs/${ANDROID_ABI}/lib${library}.so")
endforeach()
set(CMAKE_C_STANDARD_LIBRARIES_INIT "-latomic -lm")
set(CMAKE_CXX_STANDARD_LIBRARIES_INIT "${CMAKE_C_STANDARD_LIBRARIES_INIT}")
if(ANDROID_CXX_STANDARD_LIBRARIES)
string(REPLACE ";" "\" \"" ANDROID_CXX_STANDARD_LIBRARIES "\"${ANDROID_CXX_STANDARD_LIBRARIES}\"")
set(CMAKE_CXX_STANDARD_LIBRARIES_INIT "${CMAKE_CXX_STANDARD_LIBRARIES_INIT} ${ANDROID_CXX_STANDARD_LIBRARIES}")
endif()
# Configuration specific flags.
if(ANDROID_PIE)
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
list(APPEND ANDROID_LINKER_FLAGS_EXE
-pie
-fPIE)
endif()
if(ANDROID_CPP_FEATURES)
separate_arguments(ANDROID_CPP_FEATURES)
foreach(feature ${ANDROID_CPP_FEATURES})
if(NOT ${feature} MATCHES "^(rtti|exceptions)$")
message(FATAL_ERROR "Invalid Android C++ feature: ${feature}.")
endif()
list(APPEND ANDROID_COMPILER_FLAGS_CXX
-f${feature})
endforeach()
string(REPLACE ";" " " ANDROID_CPP_FEATURES "${ANDROID_CPP_FEATURES}")
endif()
if(NOT ANDROID_ALLOW_UNDEFINED_SYMBOLS)
list(APPEND ANDROID_LINKER_FLAGS
-Wl,--no-undefined)
endif()
if(ANDROID_ABI MATCHES "armeabi")
if(ANDROID_ARM_MODE STREQUAL thumb)
list(APPEND ANDROID_COMPILER_FLAGS
-mthumb)
elseif(ANDROID_ARM_MODE STREQUAL arm)
list(APPEND ANDROID_COMPILER_FLAGS
-marm)
else()
message(FATAL_ERROR "Invalid Android ARM mode: ${ANDROID_ARM_MODE}.")
endif()
if(ANDROID_ABI STREQUAL armeabi-v7a AND ANDROID_ARM_NEON)
list(APPEND ANDROID_COMPILER_FLAGS
-mfpu=neon)
endif()
endif()
if(ANDROID_DISABLE_NO_EXECUTE)
list(APPEND ANDROID_COMPILER_FLAGS
-Wa,--execstack)
list(APPEND ANDROID_LINKER_FLAGS
-Wl,-z,execstack)
else()
list(APPEND ANDROID_COMPILER_FLAGS
-Wa,--noexecstack)
list(APPEND ANDROID_LINKER_FLAGS
-Wl,-z,noexecstack)
endif()
if(ANDROID_TOOLCHAIN STREQUAL clang)
# CMake automatically forwards all compiler flags to the linker,
# and clang doesn't like having -Wa flags being used for linking.
# To prevent CMake from doing this would require meddling with
# the CMAKE_<LANG>_COMPILE_OBJECT rules, which would get quite messy.
list(APPEND ANDROID_LINKER_FLAGS
-Qunused-arguments)
endif()
if(ANDROID_DISABLE_RELRO)
list(APPEND ANDROID_LINKER_FLAGS
-Wl,-z,norelro -Wl,-z,lazy)
else()
list(APPEND ANDROID_LINKER_FLAGS
-Wl,-z,relro -Wl,-z,now)
endif()
if(ANDROID_DISABLE_FORMAT_STRING_CHECKS)
list(APPEND ANDROID_COMPILER_FLAGS
-Wno-error=format-security)
else()
list(APPEND ANDROID_COMPILER_FLAGS
-Wformat -Werror=format-security)
endif()
# Convert these lists into strings.
string(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}")
string(REPLACE ";" " " ANDROID_COMPILER_FLAGS_CXX "${ANDROID_COMPILER_FLAGS_CXX}")
string(REPLACE ";" " " ANDROID_COMPILER_FLAGS_DEBUG "${ANDROID_COMPILER_FLAGS_DEBUG}")
string(REPLACE ";" " " ANDROID_COMPILER_FLAGS_RELEASE "${ANDROID_COMPILER_FLAGS_RELEASE}")
string(REPLACE ";" " " ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS}")
string(REPLACE ";" " " ANDROID_LINKER_FLAGS_EXE "${ANDROID_LINKER_FLAGS_EXE}")
if(ANDROID_CCACHE)
set(CMAKE_C_COMPILER_LAUNCHER "${ANDROID_CCACHE}")
set(CMAKE_CXX_COMPILER_LAUNCHER "${ANDROID_CCACHE}")
endif()
set(CMAKE_C_COMPILER "${ANDROID_C_COMPILER}")
set(CMAKE_CXX_COMPILER "${ANDROID_CXX_COMPILER}")
set(CMAKE_AR "${ANDROID_AR}" CACHE FILEPATH "Archiver")
set(CMAKE_RANLIB "${ANDROID_RANLIB}" CACHE FILEPATH "Ranlib")
set(_CMAKE_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_PREFIX}")
if(ANDROID_ABI STREQUAL "x86" OR ANDROID_ABI STREQUAL "x86_64")
set(CMAKE_ASM_NASM_COMPILER
"${ANDROID_HOST_PREBUILTS}/bin/yasm${ANDROID_TOOLCHAIN_SUFFIX}")
set(CMAKE_ASM_NASM_COMPILER_ARG1 "-DELF")
endif()
# Set or retrieve the cached flags.
# This is necessary in case the user sets/changes flags in subsequent
# configures. If we included the Android flags in here, they would get
# overwritten.
set(CMAKE_C_FLAGS ""
CACHE STRING "Flags used by the compiler during all build types.")
set(CMAKE_CXX_FLAGS ""
CACHE STRING "Flags used by the compiler during all build types.")
set(CMAKE_ASM_FLAGS ""
CACHE STRING "Flags used by the compiler during all build types.")
set(CMAKE_C_FLAGS_DEBUG ""
CACHE STRING "Flags used by the compiler during debug builds.")
set(CMAKE_CXX_FLAGS_DEBUG ""
CACHE STRING "Flags used by the compiler during debug builds.")
set(CMAKE_ASM_FLAGS_DEBUG ""
CACHE STRING "Flags used by the compiler during debug builds.")
set(CMAKE_C_FLAGS_RELEASE ""
CACHE STRING "Flags used by the compiler during release builds.")
set(CMAKE_CXX_FLAGS_RELEASE ""
CACHE STRING "Flags used by the compiler during release builds.")
set(CMAKE_ASM_FLAGS_RELEASE ""
CACHE STRING "Flags used by the compiler during release builds.")
set(CMAKE_MODULE_LINKER_FLAGS ""
CACHE STRING "Flags used by the linker during the creation of modules.")
set(CMAKE_SHARED_LINKER_FLAGS ""
CACHE STRING "Flags used by the linker during the creation of dll's.")
set(CMAKE_EXE_LINKER_FLAGS ""
CACHE STRING "Flags used by the linker.")
set(CMAKE_C_FLAGS "${ANDROID_COMPILER_FLAGS} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${ANDROID_COMPILER_FLAGS} ${ANDROID_COMPILER_FLAGS_CXX} ${CMAKE_CXX_FLAGS}")
set(CMAKE_ASM_FLAGS "${ANDROID_COMPILER_FLAGS} ${CMAKE_ASM_FLAGS}")
set(CMAKE_C_FLAGS_DEBUG "${ANDROID_COMPILER_FLAGS_DEBUG} ${CMAKE_C_FLAGS_DEBUG}")
set(CMAKE_CXX_FLAGS_DEBUG "${ANDROID_COMPILER_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_DEBUG}")
set(CMAKE_ASM_FLAGS_DEBUG "${ANDROID_COMPILER_FLAGS_DEBUG} ${CMAKE_ASM_FLAGS_DEBUG}")
set(CMAKE_C_FLAGS_RELEASE "${ANDROID_COMPILER_FLAGS_RELEASE} ${CMAKE_C_FLAGS_RELEASE}")
set(CMAKE_CXX_FLAGS_RELEASE "${ANDROID_COMPILER_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_RELEASE}")
set(CMAKE_ASM_FLAGS_RELEASE "${ANDROID_COMPILER_FLAGS_RELEASE} ${CMAKE_ASM_FLAGS_RELEASE}")
set(CMAKE_SHARED_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_MODULE_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${ANDROID_LINKER_FLAGS_EXE} ${CMAKE_EXE_LINKER_FLAGS}")
# Compatibility for read-only variables.
# Read-only variables for compatibility with the other toolchain file.
# We'll keep these around for the existing projects that still use them.
# TODO: All of the variables here have equivalents in our standard set of
# configurable variables, so we can remove these once most of our users migrate
# to those variables.
set(ANDROID_NATIVE_API_LEVEL ${ANDROID_PLATFORM_LEVEL})
if(ANDROID_ALLOW_UNDEFINED_SYMBOLS)
set(ANDROID_SO_UNDEFINED TRUE)
else()
set(ANDROID_NO_UNDEFINED TRUE)
endif()
set(ANDROID_FUNCTION_LEVEL_LINKING TRUE)
set(ANDROID_GOLD_LINKER TRUE)
if(NOT ANDROID_DISABLE_NO_EXECUTE)
set(ANDROID_NOEXECSTACK TRUE)
endif()
if(NOT ANDROID_DISABLE_RELRO)
set(ANDROID_RELRO TRUE)
endif()
if(ANDROID_ARM_MODE STREQUAL arm)
set(ANDROID_FORCE_ARM_BUILD TRUE)
endif()
if(ANDROID_CPP_FEATURES MATCHES "rtti"
AND ANDROID_CPP_FEATURES MATCHES "exceptions")
set(ANDROID_STL_FORCE_FEATURES TRUE)
endif()
if(ANDROID_CCACHE)
set(NDK_CCACHE "${ANDROID_CCACHE}")
endif()
if(ANDROID_TOOLCHAIN STREQUAL clang)
set(ANDROID_TOOLCHAIN_NAME ${ANDROID_TOOLCHAIN_NAME}-clang)
else()
set(ANDROID_TOOLCHAIN_NAME ${ANDROID_TOOLCHAIN_NAME}-4.9)
endif()
set(ANDROID_NDK_HOST_X64 TRUE)
set(ANDROID_NDK_LAYOUT RELEASE)
if(ANDROID_ABI STREQUAL armeabi)
set(ARMEABI TRUE)
elseif(ANDROID_ABI STREQUAL armeabi-v7a)
set(ARMEABI_V7A TRUE)
if(ANDROID_ARM_NEON)
set(NEON TRUE)
endif()
elseif(ANDROID_ABI STREQUAL arm64-v8a)
set(ARM64_V8A TRUE)
elseif(ANDROID_ABI STREQUAL x86)
set(X86 TRUE)
elseif(ANDROID_ABI STREQUAL x86_64)
set(X86_64 TRUE)
elseif(ANDROID_ABI STREQUAL mips)
set(MIPS TRUE)
elseif(ANDROID_ABI STREQUAL mips64)
set(MIPS64 TRUE)
endif()
set(ANDROID_NDK_HOST_SYSTEM_NAME ${ANDROID_HOST_TAG})
set(ANDROID_NDK_ABI_NAME ${ANDROID_ABI})
set(ANDROID_NDK_RELEASE r${ANDROID_NDK_REVISION})
set(ANDROID_ARCH_NAME ${ANDROID_SYSROOT_ABI})
set(ANDROID_SYSROOT "${CMAKE_SYSROOT}")
set(TOOL_OS_SUFFIX ${ANDROID_TOOLCHAIN_SUFFIX})
if(ANDROID_TOOLCHAIN STREQUAL clang)
set(ANDROID_COMPILER_IS_CLANG TRUE)
endif()
# CMake 3.7+ compatibility.
if (CMAKE_VERSION VERSION_GREATER 3.7.0)
set(CMAKE_ANDROID_NDK ${ANDROID_NDK})
if(ANDROID_TOOLCHAIN STREQUAL gcc)
set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION 4.9)
else()
set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION clang)
endif()
set(CMAKE_ANDROID_STL_TYPE ${ANDROID_STL})
if(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
set(CMAKE_ANDROID_ARM_NEON ${ANDROID_ARM_NEON})
set(CMAKE_ANDROID_ARM_MODE ${ANDROID_ARM_MODE})
endif()
endif()
#!/bin/bash
build_for_linux() {
echo "linux"
}
build_for_mac() {
if [ ! `which brew` ]; then
echo "building failed! homebrew not found, please install homebrew."
return
fi
if [ ! `which cmake` ]; then
echo "installing cmake."
brew install cmake
if [ ! $? ]; then
echo "cmake install failed."
return
fi
fi
PLATFORM="x86"
MODE="Release"
CXX_FLAGS="-std=c++11 -O3 -s"
BUILD_DIR=build/release/"${PLATFORM}"
mkdir -p ${BUILD_DIR}/build
mkdir -p ${BUILD_DIR}/test
cp -r test/models ${BUILD_DIR}/test/models
cmake . \
-B"${BUILD_DIR}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DIS_MAC=true
cd ${BUILD_DIR}
make -j 8
}
build_for_android() {
if [ -z "${NDK_ROOT}" ]; then
echo "NDK_ROOT not found!"
exit -1
fi
# PLATFORM="arm-v7a"
PLATFORM="arm-v8a"
if [ "${PLATFORM}" = "arm-v7a" ]; then
ABI="armeabi-v7a with NEON"
ARM_PLATFORM="V7"
CXX_FLAGS="-O3 -std=c++11 -s"
elif [ "${PLATFORM}" = "arm-v8a" ]; then
ABI="arm64-v8a"
ARM_PLATFORM="V8"
CXX_FLAGS="-O3 -std=c++11 -s"
else
echo "unknown platform!"
exit -1
fi
MODE="Release"
ANDROID_PLATFORM_VERSION="android-15"
TOOLCHAIN_FILE="./android-cmake/android.toolchain.cmake"
ANDROID_ARM_MODE="arm"
cmake . \
-B"build/release/${PLATFORM}" \
-DANDROID_ABI="${ABI}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DANDROID_PLATFORM="${ANDROID_PLATFORM_VERSION}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DANDROID_STL=c++_static \
-DANDROID=true \
-D"${ARM_PLATFORM}"=true
cd "./build/release/${PLATFORM}"
make -j 8
}
build_for_ios() {
PLATFORM="ios"
MODE="Release"
BUILD_DIR=build/release/"${PLATFORM}"
TOOLCHAIN_FILE="./ios-cmake/ios.toolchain.cmake"
C_FLAGS="-fobjc-abi-version=2 -fobjc-arc -isysroot ${CMAKE_OSX_SYSROOT}"
CXX_FLAGS="-fobjc-abi-version=2 -fobjc-arc -std=gnu++11 -stdlib=libc++ -isysroot ${CMAKE_OSX_SYSROOT}"
mkdir -p "${BUILD_DIR}"
cmake . \
-B"${BUILD_DIR}" \
-DCMAKE_BUILD_TYPE="${MODE}" \
-DCMAKE_TOOLCHAIN_FILE="${TOOLCHAIN_FILE}" \
-DIOS_PLATFORM=OS \
-DCMAKE_C_FLAGS="${C_FLAGS}" \
-DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \
-DIS_IOS="true" \
cd "${BUILD_DIR}"
make -j 8
}
build_error() {
echo "unknown argument"
}
if [ $# -lt 1 ]; then
echo "error: target missing!"
echo "available targets: mac|linux|ios|android"
echo "sample usage: ./build.sh mac"
else
if [ $1 = "mac" ]; then
build_for_mac
elif [ $1 = "linux" ]; then
build_for_linux
elif [ $1 = "android" ]; then
build_for_android
elif [ $1 = "ios" ]; then
build_for_ios
else
build_error
fi
fi
\ No newline at end of file
# This file is part of the ios-cmake project. It was retrieved from
# https://github.com/cristeab/ios-cmake.git, which is a fork of
# https://code.google.com/p/ios-cmake/. Which in turn is based off of
# the Platform/Darwin.cmake and Platform/UnixPaths.cmake files which
# are included with CMake 2.8.4
#
# The ios-cmake project is licensed under the new BSD license.
#
# Copyright (c) 2014, Bogdan Cristea and LTE Engineering Software,
# Kitware, Inc., Insight Software Consortium. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is based off of the Platform/Darwin.cmake and
# Platform/UnixPaths.cmake files which are included with CMake 2.8.4
# It has been altered for iOS development.
#
# Updated by Alex Stewart (alexs.mac@gmail.com)
#
# *****************************************************************************
# Now maintained by Alexander Widerberg (widerbergaren [at] gmail.com)
# under the BSD-Clause-3 licence
# *****************************************************************************
#
# INFORMATION / HELP
#
# The following variables control the behaviour of this toolchain:
#
# IOS_PLATFORM: OS (default) or SIMULATOR or SIMULATOR64 or TVOS or SIMULATOR_TVOS
# OS = Build for iPhoneOS.
# SIMULATOR = Build for x86 i386 iPhone Simulator.
# SIMULATOR64 = Build for x86_64 iPhone Simulator.
# TVOS = Build for AppleTVOS.
# SIMULATOR_TVOS = Build for x86_64 AppleTV Simulator.
# CMAKE_OSX_SYSROOT: Path to the iOS SDK to use. By default this is
# automatically determined from IOS_PLATFORM and xcodebuild, but
# can also be manually specified (although this should not be required).
# CMAKE_IOS_DEVELOPER_ROOT: Path to the Developer directory for the iOS platform
# being compiled for. By default this is automatically determined from
# CMAKE_OSX_SYSROOT, but can also be manually specified (although this should
# not be required).
# ENABLE_BITCODE: (1|0) Enables or disables bitcode support. Default 1 (true)
# ENABLE_ARC: (1|0) Enables or disables ARC support. Default 1 (true, ARC enabled by default)
# IOS_ARCH: (armv7 armv7s arm64 i386 x86_64) If specified, will override the default architectures for the given IOS_PLATFORM
# OS = armv7 armv7s arm64
# SIMULATOR = i386
# SIMULATOR64 = x86_64
# TVOS = arm64
# SIMULATOR_TVOS = x86_64
#
# This toolchain defines the following variables for use externally:
#
# XCODE_VERSION: Version number (not including Build version) of Xcode detected.
# IOS_SDK_VERSION: Version of iOS SDK being used.
# CMAKE_OSX_ARCHITECTURES: Architectures being compiled for (generated from
# IOS_PLATFORM).
#
# This toolchain defines the following macros for use externally:
#
# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE XCODE_VARIANT)
# A convenience macro for setting xcode specific properties on targets.
# Available variants are: All, Release, RelWithDebInfo, Debug, MinSizeRel
# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1" "all").
#
# find_host_package (PROGRAM ARGS)
# A macro used to find executable programs on the host system, not within the
# iOS environment. Thanks to the android-cmake project for providing the
# command.
# Fix for PThread library not in path
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
set(CMAKE_HAVE_THREADS_LIBRARY 1)
set(CMAKE_USE_WIN32_THREADS_INIT 0)
set(CMAKE_USE_PTHREADS_INIT 1)
# Get the Xcode version being used.
execute_process(COMMAND xcodebuild -version
OUTPUT_VARIABLE XCODE_VERSION
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION "${XCODE_VERSION}")
string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION "${XCODE_VERSION}")
message(STATUS "Building with Xcode version: ${XCODE_VERSION}")
# Default to building for iPhoneOS if not specified otherwise, and we cannot
# determine the platform from the CMAKE_OSX_ARCHITECTURES variable. The use
# of CMAKE_OSX_ARCHITECTURES is such that try_compile() projects can correctly
# determine the value of IOS_PLATFORM from the root project, as
# CMAKE_OSX_ARCHITECTURES is propagated to them by CMake.
if (NOT DEFINED IOS_PLATFORM)
if (CMAKE_OSX_ARCHITECTURES)
if (CMAKE_OSX_ARCHITECTURES MATCHES ".*arm.*")
set(IOS_PLATFORM "OS")
elseif (CMAKE_OSX_ARCHITECTURES MATCHES "i386")
set(IOS_PLATFORM "SIMULATOR")
elseif (CMAKE_OSX_ARCHITECTURES MATCHES "x86_64")
set(IOS_PLATFORM "SIMULATOR64")
endif()
endif()
if (NOT IOS_PLATFORM)
set(IOS_PLATFORM "OS")
endif()
endif()
set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING
"Type of iOS platform for which to build.")
# Determine the platform name and architectures for use in xcodebuild commands
# from the specified IOS_PLATFORM name.
if (IOS_PLATFORM STREQUAL "OS")
set(XCODE_IOS_PLATFORM iphoneos)
if(NOT IOS_ARCH)
set(IOS_ARCH armv7 armv7s arm64)
endif()
elseif (IOS_PLATFORM STREQUAL "SIMULATOR")
set(XCODE_IOS_PLATFORM iphonesimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH i386)
endif()
elseif(IOS_PLATFORM STREQUAL "SIMULATOR64")
set(XCODE_IOS_PLATFORM iphonesimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH x86_64)
endif()
elseif (IOS_PLATFORM STREQUAL "TVOS")
set(XCODE_IOS_PLATFORM appletvos)
if(NOT IOS_ARCH)
set(IOS_ARCH arm64)
endif()
elseif (IOS_PLATFORM STREQUAL "SIMULATOR_TVOS")
set(XCODE_IOS_PLATFORM appletvsimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH x86_64)
endif()
else()
message(FATAL_ERROR "Invalid IOS_PLATFORM: ${IOS_PLATFORM}")
endif()
message(STATUS "Configuring iOS build for platform: ${IOS_PLATFORM}, "
"architecture(s): ${IOS_ARCH}")
# If user did not specify the SDK root to use, then query xcodebuild for it.
if (NOT CMAKE_OSX_SYSROOT)
execute_process(COMMAND xcodebuild -version -sdk ${XCODE_IOS_PLATFORM} Path
OUTPUT_VARIABLE CMAKE_OSX_SYSROOT
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT} for platform: ${IOS_PLATFORM}")
endif()
if (NOT EXISTS ${CMAKE_OSX_SYSROOT})
message(FATAL_ERROR "Invalid CMAKE_OSX_SYSROOT: ${CMAKE_OSX_SYSROOT} "
"does not exist.")
endif()
# Specify minimum version of deployment target.
if (NOT DEFINED IOS_DEPLOYMENT_TARGET)
# Unless specified, SDK version 8.0 is used by default as minimum target version.
set(IOS_DEPLOYMENT_TARGET "8.0"
CACHE STRING "Minimum iOS version to build for." )
message(STATUS "Using the default min-version since IOS_DEPLOYMENT_TARGET not provided!")
endif()
# Use bitcode or not
if (NOT DEFINED ENABLE_BITCODE)
# Unless specified, enable bitcode support by default
set(ENABLE_BITCODE TRUE CACHE BOOL "Wheter or not to enable bitcode")
message(STATUS "Enabling bitcode support by default. ENABLE_BITCODE not provided!")
endif()
# Use ARC or not
if (NOT DEFINED ENABLE_ARC)
# Unless specified, enable ARC support by default
set(ENABLE_ARC TRUE CACHE BOOL "Wheter or not to enable ARC")
message(STATUS "Enabling ARC support by default. ENABLE_ARC not provided!")
endif()
# Get the SDK version information.
execute_process(COMMAND xcodebuild -sdk ${CMAKE_OSX_SYSROOT} -version SDKVersion
OUTPUT_VARIABLE IOS_SDK_VERSION
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Find the Developer root for the specific iOS platform being compiled for
# from CMAKE_OSX_SYSROOT. Should be ../../ from SDK specified in
# CMAKE_OSX_SYSROOT. There does not appear to be a direct way to obtain
# this information from xcrun or xcodebuild.
if (NOT CMAKE_IOS_DEVELOPER_ROOT)
get_filename_component(IOS_PLATFORM_SDK_DIR ${CMAKE_OSX_SYSROOT} PATH)
get_filename_component(CMAKE_IOS_DEVELOPER_ROOT ${IOS_PLATFORM_SDK_DIR} PATH)
endif()
if (NOT EXISTS ${CMAKE_IOS_DEVELOPER_ROOT})
message(FATAL_ERROR "Invalid CMAKE_IOS_DEVELOPER_ROOT: "
"${CMAKE_IOS_DEVELOPER_ROOT} does not exist.")
endif()
# Find the C & C++ compilers for the specified SDK.
if (NOT CMAKE_C_COMPILER)
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang
OUTPUT_VARIABLE CMAKE_C_COMPILER
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using C compiler: ${CMAKE_C_COMPILER}")
endif()
if (NOT CMAKE_CXX_COMPILER)
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang++
OUTPUT_VARIABLE CMAKE_CXX_COMPILER
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using CXX compiler: ${CMAKE_CXX_COMPILER}")
endif()
# Find (Apple's) libtool.
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find libtool
OUTPUT_VARIABLE IOS_LIBTOOL
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using libtool: ${IOS_LIBTOOL}")
# Configure libtool to be used instead of ar + ranlib to build static libraries.
# This is required on Xcode 7+, but should also work on previous versions of
# Xcode.
set(CMAKE_C_CREATE_STATIC_LIBRARY
"${IOS_LIBTOOL} -static -o <TARGET> <LINK_FLAGS> <OBJECTS> ")
set(CMAKE_CXX_CREATE_STATIC_LIBRARY
"${IOS_LIBTOOL} -static -o <TARGET> <LINK_FLAGS> <OBJECTS> ")
# Get the version of Darwin (OS X) of the host.
execute_process(COMMAND uname -r
OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Standard settings.
set(CMAKE_SYSTEM_NAME Darwin CACHE INTERNAL "")
set(CMAKE_SYSTEM_VERSION ${IOS_SDK_VERSION} CACHE INTERNAL "")
set(UNIX TRUE CACHE BOOL "")
set(APPLE TRUE CACHE BOOL "")
set(IOS TRUE CACHE BOOL "")
set(CMAKE_AR ar CACHE FILEPATH "" FORCE)
set(CMAKE_RANLIB ranlib CACHE FILEPATH "" FORCE)
# Force unset of OS X-specific deployment target (otherwise autopopulated),
# required as of cmake 2.8.10.
set(CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING
"Must be empty for iOS builds." FORCE)
# Set the architectures for which to build.
set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE STRING "Build architecture for iOS")
# Skip the platform compiler checks for cross compiling.
set(CMAKE_CXX_COMPILER_FORCED TRUE)
set(CMAKE_CXX_COMPILER_WORKS TRUE)
set(CMAKE_C_COMPILER_FORCED TRUE)
set(CMAKE_C_COMPILER_WORKS TRUE)
# All iOS/Darwin specific settings - some may be redundant.
set(CMAKE_SHARED_LIBRARY_PREFIX "lib")
set(CMAKE_SHARED_LIBRARY_SUFFIX ".dylib")
set(CMAKE_SHARED_MODULE_PREFIX "lib")
set(CMAKE_SHARED_MODULE_SUFFIX ".so")
set(CMAKE_MODULE_EXISTS 1)
set(CMAKE_DL_LIBS "")
set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ")
set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ")
set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}")
set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
message(STATUS "Building for minimum iOS version: ${IOS_DEPLOYMENT_TARGET}"
" (SDK version: ${IOS_SDK_VERSION})")
# Note that only Xcode 7+ supports the newer more specific:
# -m${XCODE_IOS_PLATFORM}-version-min flags, older versions of Xcode use:
# -m(ios/ios-simulator)-version-min instead.
if (IOS_PLATFORM STREQUAL "OS")
if (XCODE_VERSION VERSION_LESS 7.0)
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mios-version-min=${IOS_DEPLOYMENT_TARGET}")
else()
# Xcode 7.0+ uses flags we can build directly from XCODE_IOS_PLATFORM.
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
elseif (IOS_PLATFORM STREQUAL "TVOS")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mtvos-version-min=${IOS_DEPLOYMENT_TARGET}")
elseif (IOS_PLATFORM STREQUAL "SIMULATOR_TVOS")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mtvos-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
else()
# SIMULATOR or SIMULATOR64 both use -mios-simulator-version-min.
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mios-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
message(STATUS "Version flags set to: ${XCODE_IOS_PLATFORM_VERSION_FLAGS}")
if (ENABLE_BITCODE)
set(BITCODE "-fembed-bitcode")
message(STATUS "Enabling bitcode support.")
else()
set(BITCODE "")
message(STATUS "Disabling bitcode support.")
endif()
if (ENABLE_ARC)
set(FOBJC_ARC "-fobjc-arc")
message(STATUS "Enabling ARC support.")
else()
set(FOBJC_ARC "-fno-objc-arc")
message(STATUS "Disabling ARC support.")
endif()
set(CMAKE_C_FLAGS
"${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${BITCODE} -fobjc-abi-version=2 ${FOBJC_ARC} ${C_FLAGS}")
# Hidden visibilty is required for C++ on iOS.
set(CMAKE_CXX_FLAGS
"${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${BITCODE} -fvisibility=hidden -fvisibility-inlines-hidden -fobjc-abi-version=2 ${FOBJC_ARC} ${CXX_FLAGS}")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS} -DNDEBUG -Os -fomit-frame-pointer -ffast-math ${BITCODE} ${CXX_FLAGS_MINSIZEREL}")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS} -DNDEBUG -O2 -g -fomit-frame-pointer -ffast-math ${BITCODE} ${CXX_FLAGS_RELWITHDEBINFO}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -DNDEBUG -O3 -fomit-frame-pointer -ffast-math ${BITCODE} ${CXX_FLAGS_RELEASE}")
set(CMAKE_C_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${C_LINK_FLAGS}")
set(CMAKE_CXX_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${CXX_LINK_FLAGS}")
# In order to ensure that the updated compiler flags are used in try_compile()
# tests, we have to forcibly set them in the CMake cache, not merely set them
# in the local scope.
list(APPEND VARS_TO_FORCE_IN_CACHE
CMAKE_C_FLAGS
CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_C_LINK_FLAGS
CMAKE_CXX_LINK_FLAGS)
foreach(VAR_TO_FORCE ${VARS_TO_FORCE_IN_CACHE})
set(${VAR_TO_FORCE} "${${VAR_TO_FORCE}}" CACHE STRING "" FORCE)
endforeach()
set(CMAKE_PLATFORM_HAS_INSTALLNAME 1)
set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -headerpad_max_install_names")
set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -headerpad_max_install_names")
set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,")
set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,")
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a")
# Hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old
# build tree (where install_name_tool was hardcoded) and where
# CMAKE_INSTALL_NAME_TOOL isn't in the cache and still cmake didn't fail in
# CMakeFindBinUtils.cmake (because it isn't rerun) hardcode
# CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did
# before, Alex.
if (NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool)
endif (NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
# Set the find root to the iOS developer roots and to user defined paths.
set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_OSX_SYSROOT}
${CMAKE_PREFIX_PATH} CACHE string "iOS find search path root" FORCE)
# Default to searching for frameworks first.
set(CMAKE_FIND_FRAMEWORK FIRST)
# Set up the default search directories for frameworks.
set(CMAKE_SYSTEM_FRAMEWORK_PATH
${CMAKE_OSX_SYSROOT}/System/Library/Frameworks
${CMAKE_OSX_SYSROOT}/System/Library/PrivateFrameworks
${CMAKE_OSX_SYSROOT}/Developer/Library/Frameworks)
# Only search the specified iOS SDK, not the remainder of the host filesystem.
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
# This little macro lets you set any XCode specific property.
macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE XCODE_RELVERSION)
set(XCODE_RELVERSION_I "${XCODE_RELVERSION}")
if (XCODE_RELVERSION_I STREQUAL "All")
set_property(TARGET ${TARGET} PROPERTY
XCODE_ATTRIBUTE_${XCODE_PROPERTY} "${XCODE_VALUE}")
else()
set_property(TARGET ${TARGET} PROPERTY
XCODE_ATTRIBUTE_${XCODE_PROPERTY}[variant=${XCODE_RELVERSION_I}] "${XCODE_VALUE}")
endif()
endmacro(set_xcode_property)
# This macro lets you find executable programs on the host system.
macro(find_host_package)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
set(IOS FALSE)
find_package(${ARGN})
set(IOS TRUE)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
endmacro(find_host_package)
## Paddle-Mobile
This folder is used to develop metal version for ios gpu
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once;
#include <map>
#include <string>
#include "framework/attribute.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class OperatorBase;
class OpDesc;
class BlockDesc;
class InferShapeContext;
}
using VariableNameMap = std::map<std::string, std::vector<std::string>>;
template <typename Dtype>
using OpCreator = std::function<framework::OperatorBase<Dtype>*(
const std::string& /*type*/, const VariableNameMap& /*inputs*/,
const VariableNameMap& /*outputs*/,
const framework::AttributeMap& /*attrs*/)>;
using GradOpMakerFN =
std::function<std::vector<std::unique_ptr<framework::OpDesc>>(
const framework::OpDesc&,
const std::unordered_set<std::string>& /*no_grad_set*/,
std::unordered_map<std::string, std::string>* /*grad_to_var*/,
const std::vector<framework::BlockDesc*>& grad_block)>;
using InferVarTypeFN = std::function<void(const framework::OpDesc& /*op_desc*/,
framework::BlockDesc* /*block*/)>;
using InferShapeFN = std::function<void(framework::InferShapeContext*)>;
};
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once;
namespace paddle_mobile {
enum class Precision : int { FP32 = 0 };
//! device type
enum DeviceTypeEnum { kINVALID = -1, kCPU = 0, kFPGA = 1, kGPU_MALI = 2 };
template <DeviceTypeEnum T>
struct DeviceType {};
typedef DeviceType<kCPU> CPU;
typedef DeviceType<kFPGA> FPGA;
typedef DeviceType<kGPU_MALI> GPU_MALI;
//! data type
enum DataType {
PM_INVALID = -1,
PM_HALF = 0,
PM_FLOAT = 1,
PM_DOUBLE = 2,
PM_INT8 = 3,
PM_INT16 = 4,
PM_INT32 = 5,
PM_INT64 = 6,
PM_UINT8 = 7,
PM_UINT16 = 8,
PM_UINT32 = 9,
PM_STRING = 10,
PM_BOOL = 11,
PM_SHAPE = 12,
PM_TENSOR = 13
};
//!
enum PMStatus {
PMSuccess = 0xFF, /*!< No errors */
PMNotInitialized = 0x01, /*!< Data not initialized. */
PMInvalidValue = 0x02, /*!< Incorrect variable value. */
PMMemAllocFailed = 0x03, /*!< Memory allocation error. */
PMUnKownError = 0x04, /*!< Unknown error. */
PMOutOfAuthority = 0x05, /*!< Try to modified data not your own*/
PMOutOfMem = 0x06, /*!< OOM error*/
PMUnImplError = 0x07, /*!< Unimplement error. */
PMWrongDevice = 0x08 /*!< un-correct device. */
};
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "variant.h"
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include <iostream>
#pragma once
namespace paddle_mobile {
template <int ID, typename Type>
struct IDToType {
typedef Type type_t;
};
template <typename F, typename... Ts>
struct VariantHelper {
static const size_t size = sizeof(F) > VariantHelper<Ts...>::size
? sizeof(F)
: VariantHelper<Ts...>::size;
inline static void Destroy(size_t id, void *data) {
if (id == typeid(F).hash_code()) {
reinterpret_cast<F *>(data)->~F();
} else {
VariantHelper<Ts...>::Destroy(id, data);
}
}
};
template <typename F>
struct VariantHelper<F> {
static const size_t size = sizeof(F);
inline static void Destroy(size_t id, void *data) {
if (id == typeid(F).hash_code()) {
// reinterpret_cast<F*>(data)->~F();
} else {
// std::cout << "未匹配到 " << std::endl;
}
}
};
template <size_t size>
class RawData {
public:
char data[size];
RawData() {}
RawData(const RawData &raw_data) { strcpy(data, raw_data.data); }
// void operator=(const RawData &raw_data){
// strcpy(data, raw_data.data);
// }
};
template <typename... Ts>
struct Variant {
Variant(const Variant &variant) {
// std::cout << " 赋值构造函数 " << std::endl;
type_id = variant.type_id;
data = variant.data;
}
Variant() : type_id(invalid_type()) {}
~Variant() {
// helper::Destroy(type_id, &data);
}
template <typename T, typename... Args>
void Set(Args &&... args) {
helper::Destroy(type_id, &data);
new (&data) T(std::forward<Args>(args)...);
type_id = typeid(T).hash_code();
}
template <typename T>
T &Get() const {
if (type_id == typeid(T).hash_code()) {
return *const_cast<T *>(reinterpret_cast<const T *>(&data));
} else {
// std::cout << " bad cast in variant " << std::endl;
throw std::bad_cast();
}
}
size_t TypeId() const { return type_id; }
private:
static inline size_t invalid_type() { return typeid(void).hash_code(); }
typedef VariantHelper<Ts...> helper;
size_t type_id;
RawData<helper::size> data;
};
template <typename T>
struct Vistor {
typedef T type_t;
};
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "attribute.h"
namespace paddle_mobile {
namespace framework {}
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/variant.h"
#include "framework.pb.h"
namespace paddle_mobile {
namespace framework {
class BlockDesc;
class Attribute {
public:
static Attribute GetAttrValue(const proto::OpDesc::Attr& attr_desc) {
// std::cout << "begin get attr value" << std::endl;
Attribute attr;
switch (attr_desc.type()) {
case proto::AttrType::BOOLEAN: {
attr.Set<bool>(attr_desc.b());
break;
}
case proto::AttrType::INT: {
attr.Set<int>(attr_desc.i());
break;
}
case proto::AttrType::FLOAT: {
attr.Set<float>(attr_desc.f());
break;
}
case proto::AttrType::STRING: {
attr.Set<std::string>(attr_desc.s());
break;
}
case proto::AttrType::BOOLEANS: {
std::vector<bool> val(attr_desc.bools_size());
for (int i = 0; i < attr_desc.bools_size(); ++i) {
val[i] = attr_desc.bools(i);
}
attr.Set<std::vector<bool>>(val);
break;
}
case proto::AttrType::INTS: {
std::vector<int> val(attr_desc.ints_size());
for (int i = 0; i < attr_desc.ints_size(); ++i) {
val[i] = attr_desc.ints(i);
}
attr.Set<std::vector<int>>(val);
break;
}
case proto::AttrType::FLOATS: {
std::vector<float> val(attr_desc.floats_size());
for (int i = 0; i < attr_desc.floats_size(); ++i) {
val[i] = attr_desc.floats(i);
}
attr.Set<std::vector<float>>(val);
break;
}
case proto::AttrType::STRINGS: {
std::vector<std::string> val(attr_desc.strings_size());
for (int i = 0; i < attr_desc.strings_size(); ++i) {
val[i] = attr_desc.strings(i);
}
attr.Set<std::vector<std::string>>(val);
break;
}
case proto::AttrType::LONG: {
attr.Set<int64_t>(attr_desc.l());
break;
}
default:
// std::cout << " not support " << std::endl;
break;
}
// std::cout << "end get attr value" << std::endl;
return attr;
}
Attribute() {}
template <typename T, typename... Args>
Attribute& Set(Args&&... args) {
variant_.Set<T>(args...);
return *this;
}
template <typename T>
T& Get() const {
return variant_.Get<T>();
}
private:
Variant<int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc*,
int64_t>
variant_;
};
using AttributeMap = std::unordered_map<std::string, Attribute>;
class AttrReader {
public:
explicit AttrReader(const AttributeMap& attrs) : attrs_(attrs) {}
template <typename T>
inline T Get(const std::string& name) const {
// PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in
// AttributeMap",
// name);
return ((Attribute)attrs_.at(name)).Get<T>();
}
private:
const AttributeMap& attrs_;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "block_desc.h"
namespace paddle_mobile {
namespace framework {
std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const {
std::vector<std::shared_ptr<VarDesc>> res;
for (const auto &p : vars_) {
res.push_back(p.second);
}
return res;
}
std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const {
std::vector<std::shared_ptr<OpDesc>> res;
for (const auto &op : ops_) {
res.push_back(op);
}
return res;
}
BlockDesc::BlockDesc(const proto::BlockDesc &desc) : desc_(desc) {
for (const proto::VarDesc &var_desc : desc_.vars()) {
vars_[var_desc.name()].reset(new VarDesc(var_desc));
}
for (const proto::OpDesc &op_desc : desc_.ops()) {
ops_.emplace_back(new framework::OpDesc(op_desc));
}
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "framework.pb.h"
#include "op_desc.h"
#include "paddle_mobile_object.h"
#include "var_desc.h"
namespace paddle_mobile {
namespace framework {
class BlockDesc : PaddleMobileObject {
public:
BlockDesc(const proto::BlockDesc &desc);
const int &ID() const { return desc_.idx(); }
const int &Parent() const { return desc_.parent_idx(); }
bool operator==(const paddle_mobile::framework::BlockDesc &in_block) const {
return this->ID() == in_block.ID() && this->Parent() == in_block.Parent();
}
bool operator<(const paddle_mobile::framework::BlockDesc &in_block) const {
return this->ID() < in_block.ID() && this->Parent() < in_block.Parent();
}
std::vector<std::shared_ptr<VarDesc>> Vars() const;
std::vector<std::shared_ptr<OpDesc>> Ops() const;
private:
proto::BlockDesc desc_;
std::vector<std::shared_ptr<OpDesc>> ops_;
std::unordered_map<std::string, std::shared_ptr<VarDesc>> vars_;
};
} // namespace framework
} // namespace paddle_mobile
namespace std {
template <>
struct hash<paddle_mobile::framework::BlockDesc> {
typedef paddle_mobile::framework::BlockDesc argument_type;
typedef std::size_t result_type;
result_type operator()(argument_type const &s) const noexcept {
result_type const h1(std::hash<int>{}(s.ID()));
result_type const h2(std::hash<int>{}(s.ID()));
return h1 ^ (h2 << 1);
}
};
} // namespace std
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cctype>
#include <iostream>
#include <string>
namespace paddle_mobile {
namespace framework {
enum class DataLayout {
kNHWC = 0,
kNCHW = 1,
kAnyLayout = 2,
};
inline DataLayout StringToDataLayout(const std::string& str) {
std::string s(str);
for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]);
}
if (s == "NHWC") {
return DataLayout::kNHWC;
} else if (s == "NCHW") {
return DataLayout::kNCHW;
} else if (s == "ANYLAYOUT") {
return DataLayout::kAnyLayout;
} else {
// std::cout << "Unknown storage order string: %s", s;
}
}
inline std::string DataLayoutToString(const DataLayout& data_layout) {
switch (data_layout) {
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "ANY_LAYOUT";
default:
break;
// std::cout << "unknown DataLayou %d", data_layout;
}
}
inline std::ostream& operator<<(std::ostream& out, const DataLayout& l) {
out << DataLayoutToString(l);
return out;
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "data_transform.h"
namespace paddle_mobile {
namespace framework {
static void PassTensorData(Tensor* from, Tensor* to) {
to->ShareDataWith(*from);
*from = Tensor();
}
void DataTransform(const OpKernelType& expected_kernel_type,
const OpKernelType& kernel_type_for_var,
const Tensor& input_tensor, Tensor* output_tensor) {
bool transformed = false;
Tensor in;
in.ShareDataWith(input_tensor);
Tensor out;
// // do layout transform
// if (NeedTransformLayout(expected_kernel_type.data_layout_,
// kernel_type_for_var.data_layout_)) {
// TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// // do data type transform
// if (expected_kernel_type.data_type_ != kernel_type_for_var.data_type_) {
// TransDataType(kernel_type_for_var, expected_kernel_type, in, &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// // do device transform
// if (!platform::is_same_place(kernel_type_for_var.place_,
// expected_kernel_type.place_)) {
// TransDataDevice(in, expected_kernel_type.place_, &out);
// transformed = true;
// PassTensorData(&out, &in);
// }
//
// PADDLE_ENFORCE(transformed, "No transform is applied, please check!");
// get output data
output_tensor->ShareDataWith(in);
}
void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
Variable& out_var) {
// if (in_var.IsType<LoDTensor>()) {
// auto& in_lod_tensor = in_var.Get<LoDTensor>();
// auto* tran_lod_tensor = out_var.GetMutable<LoDTensor>();
// tran_lod_tensor->set_lod(in_lod_tensor.lod());
// tran_lod_tensor->set_layout(in_lod_tensor.layout());
// tran_lod_tensor->ShareDataWith(tensor);
// } else if (in_var.IsType<SelectedRows>()) {
// auto& in_selected_rows = in_var.Get<SelectedRows>();
// auto* trans_selected_rows = out_var.GetMutable<SelectedRows>();
// trans_selected_rows->set_height(in_selected_rows.height());
// trans_selected_rows->set_rows(in_selected_rows.rows());
// trans_selected_rows->mutable_value()->ShareDataWith(tensor);
// } else {
// PADDLE_THROW("unknown var type");
// }
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <functional>
#include <utility>
#include <vector>
#include "op_kernel_type.h"
#include "selected_rows.h"
#include "tensor.h"
#include "variable.h"
namespace paddle_mobile {
namespace framework {
void DataTransform(const OpKernelType& expected_kernel_type,
const OpKernelType& kernel_type_for_var,
const Tensor& input_tensor, Tensor* out);
void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
Variable& out_var);
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "framework.pb.h"
namespace paddle_mobile {
namespace framework {
// inline proto::VarType::Type ToDataType(std::type_index type) {
// using namespace paddle_mobile::framework::proto;
// if (typeid(float).hash_code() == type.hash_code()) {
// return proto::VarType::FP32;
// } else if (typeid(double).hash_code() == type.hash_code()) {
// return proto::VarType::FP64;
// } else if (typeid(int).hash_code() == type.hash_code()) {
// return proto::VarType::INT32;
// } else if (typeid(int64_t).hash_code() == type.hash_code()) {
// return proto::VarType::INT64;
// } else if (typeid(bool).hash_code() == type.hash_code()) {
// return proto::VarType::BOOL;
// } else {
//// PADDLE_THROW("Not supported");
// }
// }
}
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ddim.h"
namespace paddle_mobile {
namespace framework {
/// @cond HIDDEN
template <int i>
Dim<i> make_dim(const int64_t* d) {
return Dim<i>(*d, make_dim<i - 1>(d + 1));
}
template <>
Dim<0> make_dim<0>(const int64_t* d) {
return Dim<0>(*d);
}
void make_ddim(DDim& ddim, const int64_t* dims, int n) {
switch (n) {
case 0:
ddim = make_dim<0>(dims);
break;
case 1:
ddim = make_dim<1>(dims);
break;
case 2:
ddim = make_dim<2>(dims);
break;
case 3:
ddim = make_dim<3>(dims);
break;
case 4:
ddim = make_dim<4>(dims);
break;
case 5:
ddim = make_dim<5>(dims);
break;
case 6:
ddim = make_dim<6>(dims);
break;
case 7:
ddim = make_dim<7>(dims);
break;
case 8:
ddim = make_dim<8>(dims);
break;
case 9:
ddim = make_dim<9>(dims);
break;
default:
// std::cout << "Dynamic dimensions must have between [1, 9]
// dimensions.";
break;
}
}
/// @endcond
DDim make_ddim(std::initializer_list<int64_t> dims) {
DDim result(make_dim(0));
make_ddim(result, dims.begin(), dims.size());
return result;
}
DDim make_ddim(const std::vector<int64_t>& dims) {
DDim result(make_dim(0));
make_ddim(result, &dims[0], dims.size());
return result;
}
DDim make_ddim(const std::vector<int>& dims) {
std::vector<int64_t> res(dims.size());
std::transform(dims.begin(), dims.end(), res.begin(),
[](int d) { return static_cast<int64_t>(d); });
return make_ddim(res);
}
/// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors
struct DynamicMutableIndexer : Vistor<int64_t&> {
public:
explicit DynamicMutableIndexer(int idx) : idx_(idx) {}
template <int D>
int64_t& operator()(Dim<D>& dim) const {
return dim[idx_];
}
private:
int idx_;
};
struct DynamicConstIndexer : public Vistor<int64_t> {
public:
explicit DynamicConstIndexer(int idx) : idx_(idx) {}
template <int D>
int64_t operator()(const Dim<D>& dim) const {
return dim[idx_];
}
private:
int idx_;
};
/// @endcond
int64_t& DDim::operator[](int idx) {
return DDim::ApplyVistor(DynamicMutableIndexer(idx), *this);
}
int64_t DDim::operator[](int idx) const {
return DDim::ApplyVistor(DynamicConstIndexer(idx), *this);
}
int DDim::size() const { return arity(*this); }
bool DDim::operator==(DDim d) const {
// if (var.which() != d.getVar().which()) {
// return false;
// } else {
std::vector<int64_t> v1 = vectorize(*this);
std::vector<int64_t> v2 = vectorize(d);
for (unsigned int i = 0; i < v1.size(); i++) {
if (v1[i] != v2[i]) {
return false;
}
}
return true;
// }
}
bool DDim::operator!=(DDim d) const { return !(*this == d); }
DDim DDim::operator+(DDim d) const {
std::vector<int64_t> v1 = vectorize(*this);
std::vector<int64_t> v2 = vectorize(d);
std::vector<int64_t> v3;
assert(v1.size() == v2.size());
for (unsigned int i = 0; i < v1.size(); i++) {
v3.push_back(v1[i] + v2[i]);
}
return make_ddim(v3);
}
DDim DDim::operator*(DDim d) const {
std::vector<int64_t> v1 = vectorize(*this);
std::vector<int64_t> v2 = vectorize(d);
std::vector<int64_t> v3;
assert(v1.size() == v2.size());
for (unsigned int i = 0; i < v1.size(); i++) {
v3.push_back(v1[i] * v2[i]);
}
return make_ddim(v3);
}
int64_t get(const DDim& ddim, int idx) { return ddim[idx]; }
void set(DDim& ddim, int idx, int value) { ddim[idx] = value; }
/// @cond HIDDEN
struct VectorizeVisitor : Vistor<void> {
std::vector<int64_t>& vector;
explicit VectorizeVisitor(std::vector<int64_t>& v) : vector(v) {}
template <typename T>
void operator()(const T& t) {
vector.push_back(t.head);
this->operator()(t.tail);
}
void operator()(const Dim<0>& t) {}
};
/// @endcond
std::vector<int64_t> vectorize(const DDim& ddim) {
std::vector<int64_t> result;
VectorizeVisitor visitor(result);
DDim::ApplyVistor(visitor, ddim);
return result;
}
// NOTE: framework::vectorize converts to type int64_t
// which does not fit cudnn inputs.
std::vector<int> vectorize2int(const DDim& ddim) {
std::vector<int64_t> temp = vectorize(ddim);
std::vector<int> result(temp.begin(), temp.end());
return result;
}
struct ProductVisitor : Vistor<int64_t> {
template <int D>
int64_t operator()(const Dim<D>& dim) {
return product(dim);
}
};
int64_t product(const DDim& ddim) {
ProductVisitor visitor;
return DDim::ApplyVistor(visitor, ddim);
}
struct SliceVectorizeVisitor : Vistor<void> {
std::vector<int64_t>& vector;
int begin;
int end;
SliceVectorizeVisitor(std::vector<int64_t>& v, int b, int e)
: vector(v), begin(b), end(e) {
// PADDLE_ENFORCE(begin < end,
// "Begin index must be less than end index in ddim
// slice.");
// PADDLE_ENFORCE(begin >= 0,
// "Begin index can't be less than zero in ddim slice.");
}
template <int S>
void operator()(const Dim<S>& dim) {
if (begin == 0) {
vector.push_back(dim.head);
} else {
--begin;
}
--end;
if (end > 0) {
this->operator()(dim.tail);
}
}
void operator()(const Dim<0>& dim) {
// PADDLE_ENFORCE(end == 0, "End index in ddim slice is out of bound.");
}
};
DDim slice_ddim(const DDim& ddim, int begin, int end) {
std::vector<int64_t> vec;
vec.reserve(end - begin);
SliceVectorizeVisitor visitor(vec, begin, end);
// boost::apply_visitor(visitor, dim);
DDim::ApplyVistor(visitor, ddim);
// visitor(ddim.var.Get<Dim<4>>());
return make_ddim(vec);
}
/// \cond HIDDEN
struct ArityVisitor : Vistor<int> {
template <int D>
int operator()(Dim<D>) const {
return D;
}
};
/// \endcond
int arity(const DDim& d) {
ArityVisitor arityVisitor = ArityVisitor();
return DDim::ApplyVistor(arityVisitor, d);
// return arityVisitor(d.var.Get<Dim<4>>());
// return boost::apply_visitor(ArityVisitor(), d); }
}
/// \cond HIDDEN
/// \endcond
struct OSVistor : Vistor<std::ostream&> {
OSVistor(std::ostream& os) : os_(os) {}
template <int D>
std::ostream& operator()(Dim<D> dim) const {
return os_ << dim;
}
private:
std::ostream& os_;
};
std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
auto vistor = OSVistor(os);
DDim::ApplyVistor(vistor, ddim);
return os;
}
DDim::DDim(std::initializer_list<int64_t> init_list) {
*this = make_ddim(init_list);
}
DDim flatten_to_2d(const DDim& src, int num_col_dims) {
int rank = src.size();
return make_ddim({product(slice_ddim(src, 0, num_col_dims)),
product(slice_ddim(src, num_col_dims, rank))});
}
DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); }
DDim stride(const DDim& ddim) {
std::vector<int64_t> strides(ddim.size());
strides[ddim.size() - 1] = 1;
for (int i = ddim.size() - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * ddim[i + 1];
}
return framework::make_ddim(strides);
}
DDim stride_numel(const framework::DDim& ddim) {
std::vector<int64_t> strides(ddim.size());
strides[ddim.size() - 1] = ddim[ddim.size() - 1];
for (int i = ddim.size() - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * ddim[i];
}
return framework::make_ddim(strides);
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <assert.h>
#include <initializer_list>
#include <stdexcept>
#include <vector>
#include "common/variant.h"
#include "dim.h"
namespace paddle_mobile {
namespace framework {
/**
* \brief A dynamically sized dimension.
*
* The number of dimensions must be between [1, 9].
*/
struct DDim {
typedef Variant<Dim<0>, Dim<1>, Dim<2>, Dim<3>, Dim<4>, Dim<5>, Dim<6>,
Dim<7>, Dim<8>, Dim<9>>
DDimVar;
DDimVar var;
template <typename Vistor>
static typename Vistor::type_t ApplyVistor(Vistor vistor, const DDim &d) {
if (d.var.TypeId() == typeid(Dim<0>).hash_code()) {
return vistor(d.var.Get<Dim<0>>());
} else if (d.var.TypeId() == typeid(Dim<1>).hash_code()) {
return vistor(d.var.Get<Dim<1>>());
} else if (d.var.TypeId() == typeid(Dim<2>).hash_code()) {
return vistor(d.var.Get<Dim<2>>());
} else if (d.var.TypeId() == typeid(Dim<3>).hash_code()) {
return vistor(d.var.Get<Dim<3>>());
} else if (d.var.TypeId() == typeid(Dim<4>).hash_code()) {
return vistor(d.var.Get<Dim<4>>());
} else if (d.var.TypeId() == typeid(Dim<5>).hash_code()) {
return vistor(d.var.Get<Dim<5>>());
} else if (d.var.TypeId() == typeid(Dim<6>).hash_code()) {
return vistor(d.var.Get<Dim<6>>());
} else if (d.var.TypeId() == typeid(Dim<7>).hash_code()) {
return vistor(d.var.Get<Dim<7>>());
} else if (d.var.TypeId() == typeid(Dim<8>).hash_code()) {
return vistor(d.var.Get<Dim<8>>());
} else if (d.var.TypeId() == typeid(Dim<9>).hash_code()) {
return vistor(d.var.Get<Dim<9>>());
} else {
printf(" dim not support \n");
throw std::bad_exception();
// return typename Vistor::type_t();
}
}
DDim() { var.Set<Dim<1>>(Dim<1>()); }
template <int D>
explicit DDim(const Dim<D> &in) {
var.Set<Dim<D>>(in);
}
/*implicit*/ DDim(std::initializer_list<int64_t> init_list);
template <int D>
DDim &operator=(const Dim<D> &in) {
var.Set<Dim<D>>(in);
return *this;
}
int64_t &operator[](int idx);
int64_t operator[](int idx) const;
// template <typename Visitor>
// typename Visitor::result_type apply_visitor(Visitor& visitor) {
// return var.apply_visitor(visitor);
// }
//
// template <typename Visitor>
// typename Visitor::result_type apply_visitor(Visitor& visitor) const {
// return var.apply_visitor(visitor);
// }
DDimVar getVar() { return var; }
bool operator==(DDim d) const;
bool operator!=(DDim d) const;
DDim operator+(DDim d) const;
DDim operator*(DDim d) const;
int size() const;
};
/**
* \brief Make a DDim from std::vector<int64_t>
*
* \param dims An vector of ints. Must be sized between [1, 9]
*/
DDim make_ddim(const std::vector<int64_t> &dims);
DDim make_ddim(const std::vector<int> &dims);
/**
* \brief Make a DDim from an initializer list
*
* \param dims An initializer list of ints. Must be sized between [1, 9]
*
*/
DDim make_ddim(std::initializer_list<int64_t> dims);
int64_t get(const DDim &dim, int idx);
void set(DDim &dim, int idx, int val);
std::vector<int64_t> vectorize(const DDim &ddim);
std::vector<int> vectorize2int(const DDim &ddim);
int64_t product(const DDim &ddim);
/**
* \brief Slice a ddim
*
* Slice dim with [begin, end).
* e.g. DDim d = make_ddim({1,2,3,4,5});
* slice_ddim(d, 1, 3); ====> {2,3}
*/
DDim slice_ddim(const DDim &dim, int begin, int end);
/**
* \brief What is the length of this dimension?
*
* \param Dynamic dimension to inspect
*/
int arity(const DDim &ddim);
std::ostream &operator<<(std::ostream &, const DDim &);
// Reshape a tensor to a matrix. The matrix's first dimension(column length)
// will be the product of tensor's first `num_col_dims` dimensions.
DDim flatten_to_2d(const DDim &src, int num_col_dims);
DDim flatten_to_1d(const DDim &src);
DDim stride(const DDim &ddim);
DDim stride_numel(const DDim &ddim);
} // namespace framework
} // namespace paddle_mobile
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <type_traits>
#include "platform/hostdevice.h"
namespace paddle_mobile {
namespace framework {
// Statically sized, statically indexed dimension
template <int i>
struct Dim {
static constexpr int dimensions = i;
template <typename... Args>
HOSTDEVICE Dim(int64_t _head, Args... _tail) : head(_head), tail(_tail...) {
static_assert(sizeof...(_tail) == i - 1,
"Dim initialized with the wrong number of parameters");
}
HOSTDEVICE
Dim(int64_t _head, const Dim<i - 1>& _tail) : head(_head), tail(_tail) {}
HOSTDEVICE
Dim() : head(0), tail() {}
/** Construct a Dim from a linear index and size. Uses Fortran order
* indexing. */
HOSTDEVICE
Dim(int64_t idx, const Dim<i>& size)
: head(idx % size.head), tail(idx / size.head, size.tail) {}
/** Construct a Dim with each dimension set to the given index */
HOSTDEVICE
Dim(int64_t idx) : head(idx), tail(idx) {}
HOSTDEVICE
bool operator==(const Dim<i>& o) const {
return (head == o.head) && (tail == o.tail);
}
HOSTDEVICE
bool operator!=(const Dim<i>& o) const { return !(*this == o); }
HOSTDEVICE
int64_t& operator[](int idx);
HOSTDEVICE
int64_t operator[](int idx) const;
HOST std::string to_string() const;
int64_t head;
Dim<i - 1> tail;
};
// Base case specialization
template <>
struct Dim<0> {
static constexpr int dimensions = 0;
HOSTDEVICE
Dim(int64_t _head) {}
HOSTDEVICE
Dim() {}
HOSTDEVICE
Dim(int idx, const Dim<0>& size) {
#ifndef __CUDA_ARCH__
if (idx > 0) {
throw std::invalid_argument("Index out of range.");
}
#else
PADDLE_ASSERT(idx == 0);
#endif
}
HOSTDEVICE
bool operator==(const Dim<0>& o) const { return true; }
HOSTDEVICE
bool operator!=(const Dim<0>& o) const { return false; }
HOSTDEVICE
int64_t& operator[](int idx);
HOSTDEVICE
int64_t operator[](int idx) const;
};
namespace {
// Helper for accessing Dim classes
template <int i>
struct DimGetter {
// Return a copy if Dim is const
template <typename D>
HOSTDEVICE static int64_t impl(const D& d) {
return DimGetter<i - 1>::impl(d.tail);
}
// Return a reference if Dim is mutable
template <typename D>
HOSTDEVICE static int64_t& impl(D& d) {
return DimGetter<i - 1>::impl(d.tail);
}
};
// Eureka! We found the element!
template <>
struct DimGetter<0> {
// Return a copy if Dim is const
template <typename D>
HOSTDEVICE static int64_t impl(const D& d) {
return d.head;
}
// Return a reference if Dim is mutable
template <typename D>
HOSTDEVICE static int64_t& impl(D& d) {
return d.head;
}
};
template <int D>
HOSTDEVICE int64_t& indexer(Dim<D>& dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
}
#else
PADDLE_ASSERT(idx >= 0);
#endif
if (idx == 0) {
return dim.head;
}
return indexer(dim.tail, idx - 1);
}
template <>
HOSTDEVICE int64_t& indexer<0>(Dim<0>& dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
PADDLE_ASSERT(false);
#if CUDA_VERSION < 8000
// On CUDA versions previous to 8.0, only __shared__ variables
// could be declared as static in the device code.
int64_t head = 0;
#else
static int64_t head = 0;
#endif
return head;
#endif
}
template <int D>
HOSTDEVICE int64_t indexer(const Dim<D>& dim, int idx) {
#ifndef __CUDA_ARCH__
if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension");
}
#else
PADDLE_ASSERT(idx >= 0);
#endif
if (idx == 0) {
return dim.head;
}
return indexer(dim.tail, idx - 1);
}
template <>
HOSTDEVICE int64_t indexer<0>(const Dim<0>& dim, int idx) {
#ifndef __CUDA_ARCH__
throw std::invalid_argument("Invalid index");
#else
PADDLE_ASSERT(false);
#if CUDA_VERSION < 8000
// On CUDA versions previous to 8.0, only __shared__ variables
// could be declared as static in the device code.
int64_t head = 0;
#else
static int64_t head = 0;
#endif
return head;
#endif
}
} // namespace
// Static access to constant Dim
template <int i, int l>
HOSTDEVICE int64_t get(const Dim<l>& d) {
return DimGetter<i>::impl(d);
}
// Static access to mutable Dim
template <int i, int l>
HOSTDEVICE int64_t& get(Dim<l>& d) {
return DimGetter<i>::impl(d);
}
// Dynamic access to constant Dim
template <int l>
HOSTDEVICE int64_t Dim<l>::operator[](int i) const {
// std::cout << "l: " << l << std::endl;
return indexer(*this, i);
}
// Dynamic access to mutable Dim
template <int l>
HOSTDEVICE int64_t& Dim<l>::operator[](int i) {
return indexer(*this, i);
}
// Dynamic access to constant Dim
inline HOSTDEVICE int64_t Dim<0>::operator[](int i) const {
return indexer(*this, i);
}
// Dynamic access to mutable Dim
inline HOSTDEVICE int64_t& Dim<0>::operator[](int i) {
return indexer(*this, i);
}
// Dynamic access to constant Dim
// without std::enable_if will try to instantiate this on get<0>(d)
template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int64_t>::type get(const Dim<l>& d,
int i) {
return d[i];
}
// Dynamic access to mutable Dim
template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int64_t&>::type get(Dim<l>& d,
int i) {
return d[i];
}
// Dot product of two dims
template <int i>
HOSTDEVICE int64_t linearize(const Dim<i>& a, const Dim<i>& b) {
return a.head * b.head + linearize(a.tail, b.tail);
}
// Base case dot product of two Dims
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline int64_t linearize(const Dim<0>& a, const Dim<0>& b) {
return 0;
}
// Product of a Dim
template <int i>
HOSTDEVICE int64_t product(const Dim<i>& a, int prod = 1) {
return prod * a.head * product(a.tail);
}
// Base case product of a Dim
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline int64_t product(const Dim<0>& a, int prod) {
return prod;
}
// Is 0 <= idx_i < size_i for all i?
template <int i>
HOSTDEVICE bool contained(const Dim<i>& idx, const Dim<i>& size) {
return ((0 <= idx.head) && (idx.head < size.head) &&
contained(idx.tail, size.tail));
}
// Base case of is 0 <= idx_i < size_i ?
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline bool contained(const Dim<0>& idx, const Dim<0>& size) {
return true;
}
/**
* \brief Compute exclusive prefix-multiply of a Dim.
*/
template <int i>
HOSTDEVICE Dim<i> ex_prefix_mul(const Dim<i>& src, int mul = 1) {
return Dim<i>(mul, ex_prefix_mul(src.tail, mul * src.head));
}
///\cond HIDDEN
// Base case of ex_prefix_mul
// Notice it is inline because it is no longer a template
template <>
HOSTDEVICE inline Dim<0> ex_prefix_mul(const Dim<0>& src, int mul) {
return Dim<0>();
}
///\endcond
/**
* Add two dimensions together
*/
template <int i>
HOSTDEVICE Dim<i> dim_plus(const Dim<i>& a, const Dim<i>& b) {
return Dim<i>(a.head + b.head, dim_plus(a.tail, b.tail));
}
// Base case
template <>
HOSTDEVICE inline Dim<0> dim_plus(const Dim<0>& a, const Dim<0>& b) {
return Dim<0>();
}
template <int i>
HOSTDEVICE Dim<i> operator+(const Dim<i>& lhs, const Dim<i>& rhs) {
return dim_plus(lhs, rhs);
}
/**
* Multiply two dimensions together
*/
template <int i>
HOSTDEVICE Dim<i> dim_mult(const Dim<i>& a, const Dim<i>& b) {
return Dim<i>(a.head * b.head, dim_mult(a.tail, b.tail));
}
// Base case
template <>
HOSTDEVICE inline Dim<0> dim_mult(const Dim<0>& a, const Dim<0>& b) {
return Dim<0>();
}
template <int i>
HOSTDEVICE Dim<i> operator*(const Dim<i>& lhs, const Dim<i>& rhs) {
return dim_mult(lhs, rhs);
}
/**
* \brief Normalize strides to ensure any dimension with extent 1
* has stride 0.
*
* \param size Dim object containing the size of an array
* \param stride Dim object containing stride of an array
* \return Dim object the same size as \p size with normalized strides
*
*/
template <int i>
HOSTDEVICE Dim<i> normalize_strides(const Dim<i>& size, const Dim<i>& stride) {
int norm_stride = size.head == 1 ? 0 : stride.head;
return Dim<i>(norm_stride, normalize_strides(size.tail, stride.tail));
}
///\cond HIDDEN
template <>
HOSTDEVICE inline Dim<0> normalize_strides(const Dim<0>& size,
const Dim<0>& stride) {
return Dim<0>();
}
///\endcond
/**
* Helper function to create a Dim
*
* \param idxes The type of Dim constructed depends on the number of params
*
*/
template <typename... Args>
HOSTDEVICE Dim<sizeof...(Args)> make_dim(Args... idxes) {
return Dim<sizeof...(Args)>(idxes...);
}
// Allows us to output a Dim
// XXX For some reason, overloading fails to resolve this correctly
template <int i>
typename std::enable_if<(i > 1), std::ostream&>::type operator<<(
std::ostream& os, const Dim<i>& d) {
os << d.head << ", " << d.tail;
return os;
}
// Base case that allows us to output a Dim
// XXX I wish this could be an overload instead of a template
template <int i>
typename std::enable_if<(i == 1), std::ostream&>::type operator<<(
std::ostream& os, const Dim<i>& d) {
os << d.head;
return os;
}
inline std::ostream& operator<<(std::ostream& os, const Dim<0>& d) {
return os;
}
template <int i>
HOST std::string Dim<i>::to_string() const {
std::stringstream stream;
stream << *this;
return stream.str();
}
template <int D>
HOSTDEVICE Dim<D> linear_to_dimension(int linear_index, Dim<D> extents) {
Dim<D> result;
for (int i = 0; i < D - 1; ++i) {
result[i] = linear_index % extents[i];
linear_index /= extents[i];
}
result[D - 1] = linear_index;
return result;
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "executor.h"
#include "lod_tensor.h"
#include "operators/conv_op.h"
#include "variable.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
Executor<Dtype>::Executor(const Program<Dtype> p) : program_(p) {
if (use_optimize_) {
to_predict_program_ = program_.optimizeProgram;
} else {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
// std::cout << " **block size " << blocks.size() << std::endl;
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
// std::cout << " ops " << ops.size() << std::endl;
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
// std::cout << " input 0 " << op->Input("Input")[0] << std::endl;
if (op->Type() == "conv2d" && op->Input("Input")[0] == "pixel") {
// std::cout << " conv2d attr size: " << op->GetAttrMap().size()
// << std::endl;
// std::cout << " input size: " << op->GetInputs().size() <<
// std::endl;
// std::cout << " output size: " << op->GetOutputs().size() <<
// std::endl;
Attribute strides_attr = op->GetAttrMap().at("strides");
std::vector<int> stride = strides_attr.Get<std::vector<int>>();
for (int k = 0; k < stride.size(); ++k) {
// std::cout << " stride " << stride[k] << std::endl;
}
std::shared_ptr<operators::ConvOp<Dtype, float>> conv =
std::make_shared<operators::ConvOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(),
program_.scope);
ops_of_block_[*block_desc.get()].push_back(conv);
}
}
}
}
template <typename Dtype>
std::shared_ptr<Tensor> Executor<Dtype>::predict(Tensor &t) {
// feed
auto scope = program_.scope;
Variable *g_feed_value = scope->Var("pixel");
auto tensor = g_feed_value->GetMutable<Tensor>();
tensor->ShareDataWith(t);
Variable *con_output = scope->Var("conv2d_0.tmp_0");
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>({1, 16, 32, 32});
// std::cout << typeid(output_tensor).name() << std::endl;
// std::cout << "output_tensor dims: " << output_tensor->dims() << std::endl;
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
predict(t, 0);
return out_tensor;
}
template <typename Dtype>
void Executor<Dtype>::predict(const Tensor &t, int block_id) {
std::shared_ptr<BlockDesc> to_predict_block =
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
// std::cout << "开始run" << std::endl;
op->Run();
}
}
template class Executor<CPU>;
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <map>
#include <string>
#include <vector>
#include "block_desc.h"
#include "framework.pb.h"
#include "operator.h"
#include "program.h"
#include "program_desc.h"
#include "scope.h"
#include "tensor.h"
#include "variable.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class Executor {
public:
Executor(const Program<Dtype> p);
std::shared_ptr<Tensor> predict(Tensor &t);
private:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
void predict(const Tensor &t, int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
ops_of_block_;
bool use_optimize_ = false;
};
} // namespace framework
} // namespace paddle_mobile
因为 它太大了无法显示 source diff 。你可以改为 查看blob
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: framework.proto
#ifndef PROTOBUF_framework_2eproto__INCLUDED
#define PROTOBUF_framework_2eproto__INCLUDED
#include <string>
#include <google/protobuf/stubs/common.h>
#if GOOGLE_PROTOBUF_VERSION < 3004000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3004000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/extension_set.h> // IWYU pragma: export
#include <google/protobuf/generated_enum_util.h>
#include <google/protobuf/generated_message_table_driven.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/message_lite.h>
#include <google/protobuf/metadata_lite.h>
#include <google/protobuf/repeated_field.h> // IWYU pragma: export
// @@protoc_insertion_point(includes)
namespace paddle_mobile {
namespace framework {
namespace proto {
class BlockDesc;
class BlockDescDefaultTypeInternal;
extern BlockDescDefaultTypeInternal _BlockDesc_default_instance_;
class OpDesc;
class OpDescDefaultTypeInternal;
extern OpDescDefaultTypeInternal _OpDesc_default_instance_;
class OpDesc_Attr;
class OpDesc_AttrDefaultTypeInternal;
extern OpDesc_AttrDefaultTypeInternal _OpDesc_Attr_default_instance_;
class OpDesc_Var;
class OpDesc_VarDefaultTypeInternal;
extern OpDesc_VarDefaultTypeInternal _OpDesc_Var_default_instance_;
class OpProto;
class OpProtoDefaultTypeInternal;
extern OpProtoDefaultTypeInternal _OpProto_default_instance_;
class OpProto_Attr;
class OpProto_AttrDefaultTypeInternal;
extern OpProto_AttrDefaultTypeInternal _OpProto_Attr_default_instance_;
class OpProto_Var;
class OpProto_VarDefaultTypeInternal;
extern OpProto_VarDefaultTypeInternal _OpProto_Var_default_instance_;
class ProgramDesc;
class ProgramDescDefaultTypeInternal;
extern ProgramDescDefaultTypeInternal _ProgramDesc_default_instance_;
class VarDesc;
class VarDescDefaultTypeInternal;
extern VarDescDefaultTypeInternal _VarDesc_default_instance_;
class VarType;
class VarTypeDefaultTypeInternal;
extern VarTypeDefaultTypeInternal _VarType_default_instance_;
class VarType_ChannelDesc;
class VarType_ChannelDescDefaultTypeInternal;
extern VarType_ChannelDescDefaultTypeInternal
_VarType_ChannelDesc_default_instance_;
class VarType_LoDTensorArrayDesc;
class VarType_LoDTensorArrayDescDefaultTypeInternal;
extern VarType_LoDTensorArrayDescDefaultTypeInternal
_VarType_LoDTensorArrayDesc_default_instance_;
class VarType_LoDTensorDesc;
class VarType_LoDTensorDescDefaultTypeInternal;
extern VarType_LoDTensorDescDefaultTypeInternal
_VarType_LoDTensorDesc_default_instance_;
class VarType_ReaderDesc;
class VarType_ReaderDescDefaultTypeInternal;
extern VarType_ReaderDescDefaultTypeInternal
_VarType_ReaderDesc_default_instance_;
class VarType_TensorDesc;
class VarType_TensorDescDefaultTypeInternal;
extern VarType_TensorDescDefaultTypeInternal
_VarType_TensorDesc_default_instance_;
class VarType_Tuple;
class VarType_TupleDefaultTypeInternal;
extern VarType_TupleDefaultTypeInternal _VarType_Tuple_default_instance_;
} // namespace proto
} // namespace framework
} // namespace paddle_mobile
namespace paddle_mobile {
namespace framework {
namespace proto {
namespace protobuf_framework_2eproto {
// Internal implementation detail -- do not call these.
struct TableStruct {
static const ::google::protobuf::internal::ParseTableField entries[];
static const ::google::protobuf::internal::AuxillaryParseTableField aux[];
static const ::google::protobuf::internal::ParseTable schema[];
static const ::google::protobuf::uint32 offsets[];
static const ::google::protobuf::internal::FieldMetadata field_metadata[];
static const ::google::protobuf::internal::SerializationTable
serialization_table[];
static void InitDefaultsImpl();
};
void AddDescriptors();
void InitDefaults();
} // namespace protobuf_framework_2eproto
enum VarType_Type {
VarType_Type_BOOL = 0,
VarType_Type_INT16 = 1,
VarType_Type_INT32 = 2,
VarType_Type_INT64 = 3,
VarType_Type_FP16 = 4,
VarType_Type_FP32 = 5,
VarType_Type_FP64 = 6,
VarType_Type_LOD_TENSOR = 7,
VarType_Type_SELECTED_ROWS = 8,
VarType_Type_FEED_MINIBATCH = 9,
VarType_Type_FETCH_LIST = 10,
VarType_Type_STEP_SCOPES = 11,
VarType_Type_LOD_RANK_TABLE = 12,
VarType_Type_LOD_TENSOR_ARRAY = 13,
VarType_Type_PLACE_LIST = 14,
VarType_Type_READER = 15,
VarType_Type_CHANNEL = 16,
VarType_Type_RAW = 17,
VarType_Type_TUPLE = 18
};
bool VarType_Type_IsValid(int value);
const VarType_Type VarType_Type_Type_MIN = VarType_Type_BOOL;
const VarType_Type VarType_Type_Type_MAX = VarType_Type_TUPLE;
const int VarType_Type_Type_ARRAYSIZE = VarType_Type_Type_MAX + 1;
enum AttrType {
INT = 0,
FLOAT = 1,
STRING = 2,
INTS = 3,
FLOATS = 4,
STRINGS = 5,
BOOLEAN = 6,
BOOLEANS = 7,
BLOCK = 8,
LONG = 9
};
bool AttrType_IsValid(int value);
const AttrType AttrType_MIN = INT;
const AttrType AttrType_MAX = LONG;
const int AttrType_ARRAYSIZE = AttrType_MAX + 1;
// ===================================================================
class OpDesc_Attr
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.OpDesc.Attr)
*/
{
public:
OpDesc_Attr();
virtual ~OpDesc_Attr();
OpDesc_Attr(const OpDesc_Attr& from);
inline OpDesc_Attr& operator=(const OpDesc_Attr& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
OpDesc_Attr(OpDesc_Attr&& from) noexcept : OpDesc_Attr() {
*this = ::std::move(from);
}
inline OpDesc_Attr& operator=(OpDesc_Attr&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const OpDesc_Attr& default_instance();
static inline const OpDesc_Attr* internal_default_instance() {
return reinterpret_cast<const OpDesc_Attr*>(
&_OpDesc_Attr_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 0;
void Swap(OpDesc_Attr* other);
friend void swap(OpDesc_Attr& a, OpDesc_Attr& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline OpDesc_Attr* New() const PROTOBUF_FINAL { return New(NULL); }
OpDesc_Attr* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const OpDesc_Attr& from);
void MergeFrom(const OpDesc_Attr& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(OpDesc_Attr* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated int32 ints = 6;
int ints_size() const;
void clear_ints();
static const int kIntsFieldNumber = 6;
::google::protobuf::int32 ints(int index) const;
void set_ints(int index, ::google::protobuf::int32 value);
void add_ints(::google::protobuf::int32 value);
const ::google::protobuf::RepeatedField<::google::protobuf::int32>& ints()
const;
::google::protobuf::RepeatedField<::google::protobuf::int32>* mutable_ints();
// repeated float floats = 7;
int floats_size() const;
void clear_floats();
static const int kFloatsFieldNumber = 7;
float floats(int index) const;
void set_floats(int index, float value);
void add_floats(float value);
const ::google::protobuf::RepeatedField<float>& floats() const;
::google::protobuf::RepeatedField<float>* mutable_floats();
// repeated string strings = 8;
int strings_size() const;
void clear_strings();
static const int kStringsFieldNumber = 8;
const ::std::string& strings(int index) const;
::std::string* mutable_strings(int index);
void set_strings(int index, const ::std::string& value);
#if LANG_CXX11
void set_strings(int index, ::std::string&& value);
#endif
void set_strings(int index, const char* value);
void set_strings(int index, const char* value, size_t size);
::std::string* add_strings();
void add_strings(const ::std::string& value);
#if LANG_CXX11
void add_strings(::std::string&& value);
#endif
void add_strings(const char* value);
void add_strings(const char* value, size_t size);
const ::google::protobuf::RepeatedPtrField<::std::string>& strings() const;
::google::protobuf::RepeatedPtrField<::std::string>* mutable_strings();
// repeated bool bools = 11;
int bools_size() const;
void clear_bools();
static const int kBoolsFieldNumber = 11;
bool bools(int index) const;
void set_bools(int index, bool value);
void add_bools(bool value);
const ::google::protobuf::RepeatedField<bool>& bools() const;
::google::protobuf::RepeatedField<bool>* mutable_bools();
// required string name = 1;
bool has_name() const;
void clear_name();
static const int kNameFieldNumber = 1;
const ::std::string& name() const;
void set_name(const ::std::string& value);
#if LANG_CXX11
void set_name(::std::string&& value);
#endif
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
// optional string s = 5;
bool has_s() const;
void clear_s();
static const int kSFieldNumber = 5;
const ::std::string& s() const;
void set_s(const ::std::string& value);
#if LANG_CXX11
void set_s(::std::string&& value);
#endif
void set_s(const char* value);
void set_s(const char* value, size_t size);
::std::string* mutable_s();
::std::string* release_s();
void set_allocated_s(::std::string* s);
// required .paddle_mobile.framework.proto.AttrType type = 2;
bool has_type() const;
void clear_type();
static const int kTypeFieldNumber = 2;
::paddle_mobile::framework::proto::AttrType type() const;
void set_type(::paddle_mobile::framework::proto::AttrType value);
// optional int32 i = 3;
bool has_i() const;
void clear_i();
static const int kIFieldNumber = 3;
::google::protobuf::int32 i() const;
void set_i(::google::protobuf::int32 value);
// optional float f = 4;
bool has_f() const;
void clear_f();
static const int kFFieldNumber = 4;
float f() const;
void set_f(float value);
// optional bool b = 10;
bool has_b() const;
void clear_b();
static const int kBFieldNumber = 10;
bool b() const;
void set_b(bool value);
// optional int64 l = 13;
bool has_l() const;
void clear_l();
static const int kLFieldNumber = 13;
::google::protobuf::int64 l() const;
void set_l(::google::protobuf::int64 value);
// optional int32 block_idx = 12;
bool has_block_idx() const;
void clear_block_idx();
static const int kBlockIdxFieldNumber = 12;
::google::protobuf::int32 block_idx() const;
void set_block_idx(::google::protobuf::int32 value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.OpDesc.Attr)
private:
void set_has_name();
void clear_has_name();
void set_has_type();
void clear_has_type();
void set_has_i();
void clear_has_i();
void set_has_f();
void clear_has_f();
void set_has_s();
void clear_has_s();
void set_has_b();
void clear_has_b();
void set_has_block_idx();
void clear_has_block_idx();
void set_has_l();
void clear_has_l();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedField<::google::protobuf::int32> ints_;
::google::protobuf::RepeatedField<float> floats_;
::google::protobuf::RepeatedPtrField<::std::string> strings_;
::google::protobuf::RepeatedField<bool> bools_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::internal::ArenaStringPtr s_;
int type_;
::google::protobuf::int32 i_;
float f_;
bool b_;
::google::protobuf::int64 l_;
::google::protobuf::int32 block_idx_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class OpDesc_Var
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.OpDesc.Var)
*/
{
public:
OpDesc_Var();
virtual ~OpDesc_Var();
OpDesc_Var(const OpDesc_Var& from);
inline OpDesc_Var& operator=(const OpDesc_Var& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
OpDesc_Var(OpDesc_Var&& from) noexcept : OpDesc_Var() {
*this = ::std::move(from);
}
inline OpDesc_Var& operator=(OpDesc_Var&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const OpDesc_Var& default_instance();
static inline const OpDesc_Var* internal_default_instance() {
return reinterpret_cast<const OpDesc_Var*>(&_OpDesc_Var_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 1;
void Swap(OpDesc_Var* other);
friend void swap(OpDesc_Var& a, OpDesc_Var& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline OpDesc_Var* New() const PROTOBUF_FINAL { return New(NULL); }
OpDesc_Var* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const OpDesc_Var& from);
void MergeFrom(const OpDesc_Var& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(OpDesc_Var* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated string arguments = 2;
int arguments_size() const;
void clear_arguments();
static const int kArgumentsFieldNumber = 2;
const ::std::string& arguments(int index) const;
::std::string* mutable_arguments(int index);
void set_arguments(int index, const ::std::string& value);
#if LANG_CXX11
void set_arguments(int index, ::std::string&& value);
#endif
void set_arguments(int index, const char* value);
void set_arguments(int index, const char* value, size_t size);
::std::string* add_arguments();
void add_arguments(const ::std::string& value);
#if LANG_CXX11
void add_arguments(::std::string&& value);
#endif
void add_arguments(const char* value);
void add_arguments(const char* value, size_t size);
const ::google::protobuf::RepeatedPtrField<::std::string>& arguments() const;
::google::protobuf::RepeatedPtrField<::std::string>* mutable_arguments();
// required string parameter = 1;
bool has_parameter() const;
void clear_parameter();
static const int kParameterFieldNumber = 1;
const ::std::string& parameter() const;
void set_parameter(const ::std::string& value);
#if LANG_CXX11
void set_parameter(::std::string&& value);
#endif
void set_parameter(const char* value);
void set_parameter(const char* value, size_t size);
::std::string* mutable_parameter();
::std::string* release_parameter();
void set_allocated_parameter(::std::string* parameter);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.OpDesc.Var)
private:
void set_has_parameter();
void clear_has_parameter();
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedPtrField<::std::string> arguments_;
::google::protobuf::internal::ArenaStringPtr parameter_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class OpDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.OpDesc)
*/
{
public:
OpDesc();
virtual ~OpDesc();
OpDesc(const OpDesc& from);
inline OpDesc& operator=(const OpDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
OpDesc(OpDesc&& from) noexcept : OpDesc() { *this = ::std::move(from); }
inline OpDesc& operator=(OpDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const OpDesc& default_instance();
static inline const OpDesc* internal_default_instance() {
return reinterpret_cast<const OpDesc*>(&_OpDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 2;
void Swap(OpDesc* other);
friend void swap(OpDesc& a, OpDesc& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline OpDesc* New() const PROTOBUF_FINAL { return New(NULL); }
OpDesc* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const OpDesc& from);
void MergeFrom(const OpDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(OpDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
typedef OpDesc_Attr Attr;
typedef OpDesc_Var Var;
// accessors -------------------------------------------------------
// repeated .paddle_mobile.framework.proto.OpDesc.Var inputs = 1;
int inputs_size() const;
void clear_inputs();
static const int kInputsFieldNumber = 1;
const ::paddle_mobile::framework::proto::OpDesc_Var& inputs(int index) const;
::paddle_mobile::framework::proto::OpDesc_Var* mutable_inputs(int index);
::paddle_mobile::framework::proto::OpDesc_Var* add_inputs();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>*
mutable_inputs();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>&
inputs() const;
// repeated .paddle_mobile.framework.proto.OpDesc.Var outputs = 2;
int outputs_size() const;
void clear_outputs();
static const int kOutputsFieldNumber = 2;
const ::paddle_mobile::framework::proto::OpDesc_Var& outputs(int index) const;
::paddle_mobile::framework::proto::OpDesc_Var* mutable_outputs(int index);
::paddle_mobile::framework::proto::OpDesc_Var* add_outputs();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>*
mutable_outputs();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>&
outputs() const;
// repeated .paddle_mobile.framework.proto.OpDesc.Attr attrs = 4;
int attrs_size() const;
void clear_attrs();
static const int kAttrsFieldNumber = 4;
const ::paddle_mobile::framework::proto::OpDesc_Attr& attrs(int index) const;
::paddle_mobile::framework::proto::OpDesc_Attr* mutable_attrs(int index);
::paddle_mobile::framework::proto::OpDesc_Attr* add_attrs();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Attr>*
mutable_attrs();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Attr>&
attrs() const;
// required string type = 3;
bool has_type() const;
void clear_type();
static const int kTypeFieldNumber = 3;
const ::std::string& type() const;
void set_type(const ::std::string& value);
#if LANG_CXX11
void set_type(::std::string&& value);
#endif
void set_type(const char* value);
void set_type(const char* value, size_t size);
::std::string* mutable_type();
::std::string* release_type();
void set_allocated_type(::std::string* type);
// optional bool is_target = 5 [default = false];
bool has_is_target() const;
void clear_is_target();
static const int kIsTargetFieldNumber = 5;
bool is_target() const;
void set_is_target(bool value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.OpDesc)
private:
void set_has_type();
void clear_has_type();
void set_has_is_target();
void clear_has_is_target();
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>
inputs_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>
outputs_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Attr>
attrs_;
::google::protobuf::internal::ArenaStringPtr type_;
bool is_target_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class OpProto_Var
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.OpProto.Var)
*/
{
public:
OpProto_Var();
virtual ~OpProto_Var();
OpProto_Var(const OpProto_Var& from);
inline OpProto_Var& operator=(const OpProto_Var& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
OpProto_Var(OpProto_Var&& from) noexcept : OpProto_Var() {
*this = ::std::move(from);
}
inline OpProto_Var& operator=(OpProto_Var&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const OpProto_Var& default_instance();
static inline const OpProto_Var* internal_default_instance() {
return reinterpret_cast<const OpProto_Var*>(
&_OpProto_Var_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 3;
void Swap(OpProto_Var* other);
friend void swap(OpProto_Var& a, OpProto_Var& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline OpProto_Var* New() const PROTOBUF_FINAL { return New(NULL); }
OpProto_Var* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const OpProto_Var& from);
void MergeFrom(const OpProto_Var& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(OpProto_Var* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// required string name = 1;
bool has_name() const;
void clear_name();
static const int kNameFieldNumber = 1;
const ::std::string& name() const;
void set_name(const ::std::string& value);
#if LANG_CXX11
void set_name(::std::string&& value);
#endif
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
// required string comment = 2;
bool has_comment() const;
void clear_comment();
static const int kCommentFieldNumber = 2;
const ::std::string& comment() const;
void set_comment(const ::std::string& value);
#if LANG_CXX11
void set_comment(::std::string&& value);
#endif
void set_comment(const char* value);
void set_comment(const char* value, size_t size);
::std::string* mutable_comment();
::std::string* release_comment();
void set_allocated_comment(::std::string* comment);
// optional bool duplicable = 3 [default = false];
bool has_duplicable() const;
void clear_duplicable();
static const int kDuplicableFieldNumber = 3;
bool duplicable() const;
void set_duplicable(bool value);
// optional bool intermediate = 4 [default = false];
bool has_intermediate() const;
void clear_intermediate();
static const int kIntermediateFieldNumber = 4;
bool intermediate() const;
void set_intermediate(bool value);
// optional bool dispensable = 5 [default = false];
bool has_dispensable() const;
void clear_dispensable();
static const int kDispensableFieldNumber = 5;
bool dispensable() const;
void set_dispensable(bool value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.OpProto.Var)
private:
void set_has_name();
void clear_has_name();
void set_has_comment();
void clear_has_comment();
void set_has_duplicable();
void clear_has_duplicable();
void set_has_intermediate();
void clear_has_intermediate();
void set_has_dispensable();
void clear_has_dispensable();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::internal::ArenaStringPtr comment_;
bool duplicable_;
bool intermediate_;
bool dispensable_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class OpProto_Attr
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.OpProto.Attr)
*/
{
public:
OpProto_Attr();
virtual ~OpProto_Attr();
OpProto_Attr(const OpProto_Attr& from);
inline OpProto_Attr& operator=(const OpProto_Attr& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
OpProto_Attr(OpProto_Attr&& from) noexcept : OpProto_Attr() {
*this = ::std::move(from);
}
inline OpProto_Attr& operator=(OpProto_Attr&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const OpProto_Attr& default_instance();
static inline const OpProto_Attr* internal_default_instance() {
return reinterpret_cast<const OpProto_Attr*>(
&_OpProto_Attr_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 4;
void Swap(OpProto_Attr* other);
friend void swap(OpProto_Attr& a, OpProto_Attr& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline OpProto_Attr* New() const PROTOBUF_FINAL { return New(NULL); }
OpProto_Attr* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const OpProto_Attr& from);
void MergeFrom(const OpProto_Attr& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(OpProto_Attr* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// required string name = 1;
bool has_name() const;
void clear_name();
static const int kNameFieldNumber = 1;
const ::std::string& name() const;
void set_name(const ::std::string& value);
#if LANG_CXX11
void set_name(::std::string&& value);
#endif
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
// required string comment = 3;
bool has_comment() const;
void clear_comment();
static const int kCommentFieldNumber = 3;
const ::std::string& comment() const;
void set_comment(const ::std::string& value);
#if LANG_CXX11
void set_comment(::std::string&& value);
#endif
void set_comment(const char* value);
void set_comment(const char* value, size_t size);
::std::string* mutable_comment();
::std::string* release_comment();
void set_allocated_comment(::std::string* comment);
// required .paddle_mobile.framework.proto.AttrType type = 2;
bool has_type() const;
void clear_type();
static const int kTypeFieldNumber = 2;
::paddle_mobile::framework::proto::AttrType type() const;
void set_type(::paddle_mobile::framework::proto::AttrType value);
// optional bool generated = 4 [default = false];
bool has_generated() const;
void clear_generated();
static const int kGeneratedFieldNumber = 4;
bool generated() const;
void set_generated(bool value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.OpProto.Attr)
private:
void set_has_name();
void clear_has_name();
void set_has_type();
void clear_has_type();
void set_has_comment();
void clear_has_comment();
void set_has_generated();
void clear_has_generated();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::internal::ArenaStringPtr name_;
::google::protobuf::internal::ArenaStringPtr comment_;
int type_;
bool generated_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class OpProto
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.OpProto)
*/
{
public:
OpProto();
virtual ~OpProto();
OpProto(const OpProto& from);
inline OpProto& operator=(const OpProto& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
OpProto(OpProto&& from) noexcept : OpProto() { *this = ::std::move(from); }
inline OpProto& operator=(OpProto&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const OpProto& default_instance();
static inline const OpProto* internal_default_instance() {
return reinterpret_cast<const OpProto*>(&_OpProto_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 5;
void Swap(OpProto* other);
friend void swap(OpProto& a, OpProto& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline OpProto* New() const PROTOBUF_FINAL { return New(NULL); }
OpProto* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const OpProto& from);
void MergeFrom(const OpProto& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(OpProto* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
typedef OpProto_Var Var;
typedef OpProto_Attr Attr;
// accessors -------------------------------------------------------
// repeated .paddle_mobile.framework.proto.OpProto.Var inputs = 2;
int inputs_size() const;
void clear_inputs();
static const int kInputsFieldNumber = 2;
const ::paddle_mobile::framework::proto::OpProto_Var& inputs(int index) const;
::paddle_mobile::framework::proto::OpProto_Var* mutable_inputs(int index);
::paddle_mobile::framework::proto::OpProto_Var* add_inputs();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>*
mutable_inputs();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>&
inputs() const;
// repeated .paddle_mobile.framework.proto.OpProto.Var outputs = 3;
int outputs_size() const;
void clear_outputs();
static const int kOutputsFieldNumber = 3;
const ::paddle_mobile::framework::proto::OpProto_Var& outputs(
int index) const;
::paddle_mobile::framework::proto::OpProto_Var* mutable_outputs(int index);
::paddle_mobile::framework::proto::OpProto_Var* add_outputs();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>*
mutable_outputs();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>&
outputs() const;
// repeated .paddle_mobile.framework.proto.OpProto.Attr attrs = 4;
int attrs_size() const;
void clear_attrs();
static const int kAttrsFieldNumber = 4;
const ::paddle_mobile::framework::proto::OpProto_Attr& attrs(int index) const;
::paddle_mobile::framework::proto::OpProto_Attr* mutable_attrs(int index);
::paddle_mobile::framework::proto::OpProto_Attr* add_attrs();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Attr>*
mutable_attrs();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Attr>&
attrs() const;
// required string type = 1;
bool has_type() const;
void clear_type();
static const int kTypeFieldNumber = 1;
const ::std::string& type() const;
void set_type(const ::std::string& value);
#if LANG_CXX11
void set_type(::std::string&& value);
#endif
void set_type(const char* value);
void set_type(const char* value, size_t size);
::std::string* mutable_type();
::std::string* release_type();
void set_allocated_type(::std::string* type);
// required string comment = 5;
bool has_comment() const;
void clear_comment();
static const int kCommentFieldNumber = 5;
const ::std::string& comment() const;
void set_comment(const ::std::string& value);
#if LANG_CXX11
void set_comment(::std::string&& value);
#endif
void set_comment(const char* value);
void set_comment(const char* value, size_t size);
::std::string* mutable_comment();
::std::string* release_comment();
void set_allocated_comment(::std::string* comment);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.OpProto)
private:
void set_has_type();
void clear_has_type();
void set_has_comment();
void clear_has_comment();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>
inputs_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>
outputs_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Attr>
attrs_;
::google::protobuf::internal::ArenaStringPtr type_;
::google::protobuf::internal::ArenaStringPtr comment_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType_TensorDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType.TensorDesc)
*/
{
public:
VarType_TensorDesc();
virtual ~VarType_TensorDesc();
VarType_TensorDesc(const VarType_TensorDesc& from);
inline VarType_TensorDesc& operator=(const VarType_TensorDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType_TensorDesc(VarType_TensorDesc&& from) noexcept
: VarType_TensorDesc() {
*this = ::std::move(from);
}
inline VarType_TensorDesc& operator=(VarType_TensorDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType_TensorDesc& default_instance();
static inline const VarType_TensorDesc* internal_default_instance() {
return reinterpret_cast<const VarType_TensorDesc*>(
&_VarType_TensorDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 6;
void Swap(VarType_TensorDesc* other);
friend void swap(VarType_TensorDesc& a, VarType_TensorDesc& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline VarType_TensorDesc* New() const PROTOBUF_FINAL { return New(NULL); }
VarType_TensorDesc* New(::google::protobuf::Arena* arena) const
PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType_TensorDesc& from);
void MergeFrom(const VarType_TensorDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType_TensorDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated int64 dims = 2;
int dims_size() const;
void clear_dims();
static const int kDimsFieldNumber = 2;
::google::protobuf::int64 dims(int index) const;
void set_dims(int index, ::google::protobuf::int64 value);
void add_dims(::google::protobuf::int64 value);
const ::google::protobuf::RepeatedField<::google::protobuf::int64>& dims()
const;
::google::protobuf::RepeatedField<::google::protobuf::int64>* mutable_dims();
// required .paddle_mobile.framework.proto.VarType.Type data_type = 1;
bool has_data_type() const;
void clear_data_type();
static const int kDataTypeFieldNumber = 1;
::paddle_mobile::framework::proto::VarType_Type data_type() const;
void set_data_type(::paddle_mobile::framework::proto::VarType_Type value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType.TensorDesc)
private:
void set_has_data_type();
void clear_has_data_type();
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedField<::google::protobuf::int64> dims_;
int data_type_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType_LoDTensorDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType.LoDTensorDesc)
*/
{
public:
VarType_LoDTensorDesc();
virtual ~VarType_LoDTensorDesc();
VarType_LoDTensorDesc(const VarType_LoDTensorDesc& from);
inline VarType_LoDTensorDesc& operator=(const VarType_LoDTensorDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType_LoDTensorDesc(VarType_LoDTensorDesc&& from) noexcept
: VarType_LoDTensorDesc() {
*this = ::std::move(from);
}
inline VarType_LoDTensorDesc& operator=(
VarType_LoDTensorDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType_LoDTensorDesc& default_instance();
static inline const VarType_LoDTensorDesc* internal_default_instance() {
return reinterpret_cast<const VarType_LoDTensorDesc*>(
&_VarType_LoDTensorDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 7;
void Swap(VarType_LoDTensorDesc* other);
friend void swap(VarType_LoDTensorDesc& a, VarType_LoDTensorDesc& b) {
a.Swap(&b);
}
// implements Message ----------------------------------------------
inline VarType_LoDTensorDesc* New() const PROTOBUF_FINAL { return New(NULL); }
VarType_LoDTensorDesc* New(::google::protobuf::Arena* arena) const
PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType_LoDTensorDesc& from);
void MergeFrom(const VarType_LoDTensorDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType_LoDTensorDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// required .paddle_mobile.framework.proto.VarType.TensorDesc tensor = 1;
bool has_tensor() const;
void clear_tensor();
static const int kTensorFieldNumber = 1;
const ::paddle_mobile::framework::proto::VarType_TensorDesc& tensor() const;
::paddle_mobile::framework::proto::VarType_TensorDesc* mutable_tensor();
::paddle_mobile::framework::proto::VarType_TensorDesc* release_tensor();
void set_allocated_tensor(
::paddle_mobile::framework::proto::VarType_TensorDesc* tensor);
// optional int32 lod_level = 2 [default = 0];
bool has_lod_level() const;
void clear_lod_level();
static const int kLodLevelFieldNumber = 2;
::google::protobuf::int32 lod_level() const;
void set_lod_level(::google::protobuf::int32 value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType.LoDTensorDesc)
private:
void set_has_tensor();
void clear_has_tensor();
void set_has_lod_level();
void clear_has_lod_level();
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::paddle_mobile::framework::proto::VarType_TensorDesc* tensor_;
::google::protobuf::int32 lod_level_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType_LoDTensorArrayDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc)
*/
{
public:
VarType_LoDTensorArrayDesc();
virtual ~VarType_LoDTensorArrayDesc();
VarType_LoDTensorArrayDesc(const VarType_LoDTensorArrayDesc& from);
inline VarType_LoDTensorArrayDesc& operator=(
const VarType_LoDTensorArrayDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType_LoDTensorArrayDesc(VarType_LoDTensorArrayDesc&& from) noexcept
: VarType_LoDTensorArrayDesc() {
*this = ::std::move(from);
}
inline VarType_LoDTensorArrayDesc& operator=(
VarType_LoDTensorArrayDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType_LoDTensorArrayDesc& default_instance();
static inline const VarType_LoDTensorArrayDesc* internal_default_instance() {
return reinterpret_cast<const VarType_LoDTensorArrayDesc*>(
&_VarType_LoDTensorArrayDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 8;
void Swap(VarType_LoDTensorArrayDesc* other);
friend void swap(VarType_LoDTensorArrayDesc& a,
VarType_LoDTensorArrayDesc& b) {
a.Swap(&b);
}
// implements Message ----------------------------------------------
inline VarType_LoDTensorArrayDesc* New() const PROTOBUF_FINAL {
return New(NULL);
}
VarType_LoDTensorArrayDesc* New(::google::protobuf::Arena* arena) const
PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType_LoDTensorArrayDesc& from);
void MergeFrom(const VarType_LoDTensorArrayDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType_LoDTensorArrayDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// required .paddle_mobile.framework.proto.VarType.TensorDesc tensor = 1;
bool has_tensor() const;
void clear_tensor();
static const int kTensorFieldNumber = 1;
const ::paddle_mobile::framework::proto::VarType_TensorDesc& tensor() const;
::paddle_mobile::framework::proto::VarType_TensorDesc* mutable_tensor();
::paddle_mobile::framework::proto::VarType_TensorDesc* release_tensor();
void set_allocated_tensor(
::paddle_mobile::framework::proto::VarType_TensorDesc* tensor);
// optional int32 lod_level = 2 [default = 0];
bool has_lod_level() const;
void clear_lod_level();
static const int kLodLevelFieldNumber = 2;
::google::protobuf::int32 lod_level() const;
void set_lod_level(::google::protobuf::int32 value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc)
private:
void set_has_tensor();
void clear_has_tensor();
void set_has_lod_level();
void clear_has_lod_level();
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::paddle_mobile::framework::proto::VarType_TensorDesc* tensor_;
::google::protobuf::int32 lod_level_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType_ReaderDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType.ReaderDesc)
*/
{
public:
VarType_ReaderDesc();
virtual ~VarType_ReaderDesc();
VarType_ReaderDesc(const VarType_ReaderDesc& from);
inline VarType_ReaderDesc& operator=(const VarType_ReaderDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType_ReaderDesc(VarType_ReaderDesc&& from) noexcept
: VarType_ReaderDesc() {
*this = ::std::move(from);
}
inline VarType_ReaderDesc& operator=(VarType_ReaderDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType_ReaderDesc& default_instance();
static inline const VarType_ReaderDesc* internal_default_instance() {
return reinterpret_cast<const VarType_ReaderDesc*>(
&_VarType_ReaderDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 9;
void Swap(VarType_ReaderDesc* other);
friend void swap(VarType_ReaderDesc& a, VarType_ReaderDesc& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline VarType_ReaderDesc* New() const PROTOBUF_FINAL { return New(NULL); }
VarType_ReaderDesc* New(::google::protobuf::Arena* arena) const
PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType_ReaderDesc& from);
void MergeFrom(const VarType_ReaderDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType_ReaderDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .paddle_mobile.framework.proto.VarType.LoDTensorDesc lod_tensor =
// 1;
int lod_tensor_size() const;
void clear_lod_tensor();
static const int kLodTensorFieldNumber = 1;
const ::paddle_mobile::framework::proto::VarType_LoDTensorDesc& lod_tensor(
int index) const;
::paddle_mobile::framework::proto::VarType_LoDTensorDesc* mutable_lod_tensor(
int index);
::paddle_mobile::framework::proto::VarType_LoDTensorDesc* add_lod_tensor();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarType_LoDTensorDesc>*
mutable_lod_tensor();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarType_LoDTensorDesc>&
lod_tensor() const;
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType.ReaderDesc)
private:
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarType_LoDTensorDesc>
lod_tensor_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType_ChannelDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType.ChannelDesc)
*/
{
public:
VarType_ChannelDesc();
virtual ~VarType_ChannelDesc();
VarType_ChannelDesc(const VarType_ChannelDesc& from);
inline VarType_ChannelDesc& operator=(const VarType_ChannelDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType_ChannelDesc(VarType_ChannelDesc&& from) noexcept
: VarType_ChannelDesc() {
*this = ::std::move(from);
}
inline VarType_ChannelDesc& operator=(VarType_ChannelDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType_ChannelDesc& default_instance();
static inline const VarType_ChannelDesc* internal_default_instance() {
return reinterpret_cast<const VarType_ChannelDesc*>(
&_VarType_ChannelDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 10;
void Swap(VarType_ChannelDesc* other);
friend void swap(VarType_ChannelDesc& a, VarType_ChannelDesc& b) {
a.Swap(&b);
}
// implements Message ----------------------------------------------
inline VarType_ChannelDesc* New() const PROTOBUF_FINAL { return New(NULL); }
VarType_ChannelDesc* New(::google::protobuf::Arena* arena) const
PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType_ChannelDesc& from);
void MergeFrom(const VarType_ChannelDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType_ChannelDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// required int64 capacity = 2;
bool has_capacity() const;
void clear_capacity();
static const int kCapacityFieldNumber = 2;
::google::protobuf::int64 capacity() const;
void set_capacity(::google::protobuf::int64 value);
// required .paddle_mobile.framework.proto.VarType.Type data_type = 1;
bool has_data_type() const;
void clear_data_type();
static const int kDataTypeFieldNumber = 1;
::paddle_mobile::framework::proto::VarType_Type data_type() const;
void set_data_type(::paddle_mobile::framework::proto::VarType_Type value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType.ChannelDesc)
private:
void set_has_data_type();
void clear_has_data_type();
void set_has_capacity();
void clear_has_capacity();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::int64 capacity_;
int data_type_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType_Tuple
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType.Tuple)
*/
{
public:
VarType_Tuple();
virtual ~VarType_Tuple();
VarType_Tuple(const VarType_Tuple& from);
inline VarType_Tuple& operator=(const VarType_Tuple& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType_Tuple(VarType_Tuple&& from) noexcept : VarType_Tuple() {
*this = ::std::move(from);
}
inline VarType_Tuple& operator=(VarType_Tuple&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType_Tuple& default_instance();
static inline const VarType_Tuple* internal_default_instance() {
return reinterpret_cast<const VarType_Tuple*>(
&_VarType_Tuple_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 11;
void Swap(VarType_Tuple* other);
friend void swap(VarType_Tuple& a, VarType_Tuple& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline VarType_Tuple* New() const PROTOBUF_FINAL { return New(NULL); }
VarType_Tuple* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType_Tuple& from);
void MergeFrom(const VarType_Tuple& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType_Tuple* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .paddle_mobile.framework.proto.VarType.Type element_type = 1;
int element_type_size() const;
void clear_element_type();
static const int kElementTypeFieldNumber = 1;
::paddle_mobile::framework::proto::VarType_Type element_type(int index) const;
void set_element_type(int index,
::paddle_mobile::framework::proto::VarType_Type value);
void add_element_type(::paddle_mobile::framework::proto::VarType_Type value);
const ::google::protobuf::RepeatedField<int>& element_type() const;
::google::protobuf::RepeatedField<int>* mutable_element_type();
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType.Tuple)
private:
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedField<int> element_type_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarType
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarType)
*/
{
public:
VarType();
virtual ~VarType();
VarType(const VarType& from);
inline VarType& operator=(const VarType& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarType(VarType&& from) noexcept : VarType() { *this = ::std::move(from); }
inline VarType& operator=(VarType&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarType& default_instance();
static inline const VarType* internal_default_instance() {
return reinterpret_cast<const VarType*>(&_VarType_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 12;
void Swap(VarType* other);
friend void swap(VarType& a, VarType& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline VarType* New() const PROTOBUF_FINAL { return New(NULL); }
VarType* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarType& from);
void MergeFrom(const VarType& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarType* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
typedef VarType_TensorDesc TensorDesc;
typedef VarType_LoDTensorDesc LoDTensorDesc;
typedef VarType_LoDTensorArrayDesc LoDTensorArrayDesc;
typedef VarType_ReaderDesc ReaderDesc;
typedef VarType_ChannelDesc ChannelDesc;
typedef VarType_Tuple Tuple;
typedef VarType_Type Type;
static const Type BOOL = VarType_Type_BOOL;
static const Type INT16 = VarType_Type_INT16;
static const Type INT32 = VarType_Type_INT32;
static const Type INT64 = VarType_Type_INT64;
static const Type FP16 = VarType_Type_FP16;
static const Type FP32 = VarType_Type_FP32;
static const Type FP64 = VarType_Type_FP64;
static const Type LOD_TENSOR = VarType_Type_LOD_TENSOR;
static const Type SELECTED_ROWS = VarType_Type_SELECTED_ROWS;
static const Type FEED_MINIBATCH = VarType_Type_FEED_MINIBATCH;
static const Type FETCH_LIST = VarType_Type_FETCH_LIST;
static const Type STEP_SCOPES = VarType_Type_STEP_SCOPES;
static const Type LOD_RANK_TABLE = VarType_Type_LOD_RANK_TABLE;
static const Type LOD_TENSOR_ARRAY = VarType_Type_LOD_TENSOR_ARRAY;
static const Type PLACE_LIST = VarType_Type_PLACE_LIST;
static const Type READER = VarType_Type_READER;
static const Type CHANNEL = VarType_Type_CHANNEL;
static const Type RAW = VarType_Type_RAW;
static const Type TUPLE = VarType_Type_TUPLE;
static inline bool Type_IsValid(int value) {
return VarType_Type_IsValid(value);
}
static const Type Type_MIN = VarType_Type_Type_MIN;
static const Type Type_MAX = VarType_Type_Type_MAX;
static const int Type_ARRAYSIZE = VarType_Type_Type_ARRAYSIZE;
// accessors -------------------------------------------------------
// optional .paddle_mobile.framework.proto.VarType.TensorDesc selected_rows =
// 2;
bool has_selected_rows() const;
void clear_selected_rows();
static const int kSelectedRowsFieldNumber = 2;
const ::paddle_mobile::framework::proto::VarType_TensorDesc& selected_rows()
const;
::paddle_mobile::framework::proto::VarType_TensorDesc*
mutable_selected_rows();
::paddle_mobile::framework::proto::VarType_TensorDesc*
release_selected_rows();
void set_allocated_selected_rows(
::paddle_mobile::framework::proto::VarType_TensorDesc* selected_rows);
// optional .paddle_mobile.framework.proto.VarType.LoDTensorDesc lod_tensor =
// 3;
bool has_lod_tensor() const;
void clear_lod_tensor();
static const int kLodTensorFieldNumber = 3;
const ::paddle_mobile::framework::proto::VarType_LoDTensorDesc& lod_tensor()
const;
::paddle_mobile::framework::proto::VarType_LoDTensorDesc*
mutable_lod_tensor();
::paddle_mobile::framework::proto::VarType_LoDTensorDesc*
release_lod_tensor();
void set_allocated_lod_tensor(
::paddle_mobile::framework::proto::VarType_LoDTensorDesc* lod_tensor);
// optional .paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc
// tensor_array = 4;
bool has_tensor_array() const;
void clear_tensor_array();
static const int kTensorArrayFieldNumber = 4;
const ::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc&
tensor_array() const;
::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc*
mutable_tensor_array();
::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc*
release_tensor_array();
void set_allocated_tensor_array(
::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc*
tensor_array);
// optional .paddle_mobile.framework.proto.VarType.ReaderDesc reader = 5;
bool has_reader() const;
void clear_reader();
static const int kReaderFieldNumber = 5;
const ::paddle_mobile::framework::proto::VarType_ReaderDesc& reader() const;
::paddle_mobile::framework::proto::VarType_ReaderDesc* mutable_reader();
::paddle_mobile::framework::proto::VarType_ReaderDesc* release_reader();
void set_allocated_reader(
::paddle_mobile::framework::proto::VarType_ReaderDesc* reader);
// optional .paddle_mobile.framework.proto.VarType.ChannelDesc channel = 6;
bool has_channel() const;
void clear_channel();
static const int kChannelFieldNumber = 6;
const ::paddle_mobile::framework::proto::VarType_ChannelDesc& channel() const;
::paddle_mobile::framework::proto::VarType_ChannelDesc* mutable_channel();
::paddle_mobile::framework::proto::VarType_ChannelDesc* release_channel();
void set_allocated_channel(
::paddle_mobile::framework::proto::VarType_ChannelDesc* channel);
// optional .paddle_mobile.framework.proto.VarType.Tuple tuple = 7;
bool has_tuple() const;
void clear_tuple();
static const int kTupleFieldNumber = 7;
const ::paddle_mobile::framework::proto::VarType_Tuple& tuple() const;
::paddle_mobile::framework::proto::VarType_Tuple* mutable_tuple();
::paddle_mobile::framework::proto::VarType_Tuple* release_tuple();
void set_allocated_tuple(
::paddle_mobile::framework::proto::VarType_Tuple* tuple);
// required .paddle_mobile.framework.proto.VarType.Type type = 1;
bool has_type() const;
void clear_type();
static const int kTypeFieldNumber = 1;
::paddle_mobile::framework::proto::VarType_Type type() const;
void set_type(::paddle_mobile::framework::proto::VarType_Type value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarType)
private:
void set_has_type();
void clear_has_type();
void set_has_selected_rows();
void clear_has_selected_rows();
void set_has_lod_tensor();
void clear_has_lod_tensor();
void set_has_tensor_array();
void clear_has_tensor_array();
void set_has_reader();
void clear_has_reader();
void set_has_channel();
void clear_has_channel();
void set_has_tuple();
void clear_has_tuple();
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::paddle_mobile::framework::proto::VarType_TensorDesc* selected_rows_;
::paddle_mobile::framework::proto::VarType_LoDTensorDesc* lod_tensor_;
::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc* tensor_array_;
::paddle_mobile::framework::proto::VarType_ReaderDesc* reader_;
::paddle_mobile::framework::proto::VarType_ChannelDesc* channel_;
::paddle_mobile::framework::proto::VarType_Tuple* tuple_;
int type_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class VarDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.VarDesc)
*/
{
public:
VarDesc();
virtual ~VarDesc();
VarDesc(const VarDesc& from);
inline VarDesc& operator=(const VarDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
VarDesc(VarDesc&& from) noexcept : VarDesc() { *this = ::std::move(from); }
inline VarDesc& operator=(VarDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const VarDesc& default_instance();
static inline const VarDesc* internal_default_instance() {
return reinterpret_cast<const VarDesc*>(&_VarDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 13;
void Swap(VarDesc* other);
friend void swap(VarDesc& a, VarDesc& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline VarDesc* New() const PROTOBUF_FINAL { return New(NULL); }
VarDesc* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const VarDesc& from);
void MergeFrom(const VarDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(VarDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// required string name = 1;
bool has_name() const;
void clear_name();
static const int kNameFieldNumber = 1;
const ::std::string& name() const;
void set_name(const ::std::string& value);
#if LANG_CXX11
void set_name(::std::string&& value);
#endif
void set_name(const char* value);
void set_name(const char* value, size_t size);
::std::string* mutable_name();
::std::string* release_name();
void set_allocated_name(::std::string* name);
// required .paddle_mobile.framework.proto.VarType type = 2;
bool has_type() const;
void clear_type();
static const int kTypeFieldNumber = 2;
const ::paddle_mobile::framework::proto::VarType& type() const;
::paddle_mobile::framework::proto::VarType* mutable_type();
::paddle_mobile::framework::proto::VarType* release_type();
void set_allocated_type(::paddle_mobile::framework::proto::VarType* type);
// optional bool persistable = 3 [default = false];
bool has_persistable() const;
void clear_persistable();
static const int kPersistableFieldNumber = 3;
bool persistable() const;
void set_persistable(bool value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.VarDesc)
private:
void set_has_name();
void clear_has_name();
void set_has_type();
void clear_has_type();
void set_has_persistable();
void clear_has_persistable();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::internal::ArenaStringPtr name_;
::paddle_mobile::framework::proto::VarType* type_;
bool persistable_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class BlockDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.BlockDesc)
*/
{
public:
BlockDesc();
virtual ~BlockDesc();
BlockDesc(const BlockDesc& from);
inline BlockDesc& operator=(const BlockDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
BlockDesc(BlockDesc&& from) noexcept : BlockDesc() {
*this = ::std::move(from);
}
inline BlockDesc& operator=(BlockDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const BlockDesc& default_instance();
static inline const BlockDesc* internal_default_instance() {
return reinterpret_cast<const BlockDesc*>(&_BlockDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 14;
void Swap(BlockDesc* other);
friend void swap(BlockDesc& a, BlockDesc& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline BlockDesc* New() const PROTOBUF_FINAL { return New(NULL); }
BlockDesc* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const BlockDesc& from);
void MergeFrom(const BlockDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(BlockDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .paddle_mobile.framework.proto.VarDesc vars = 3;
int vars_size() const;
void clear_vars();
static const int kVarsFieldNumber = 3;
const ::paddle_mobile::framework::proto::VarDesc& vars(int index) const;
::paddle_mobile::framework::proto::VarDesc* mutable_vars(int index);
::paddle_mobile::framework::proto::VarDesc* add_vars();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarDesc>*
mutable_vars();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarDesc>&
vars() const;
// repeated .paddle_mobile.framework.proto.OpDesc ops = 4;
int ops_size() const;
void clear_ops();
static const int kOpsFieldNumber = 4;
const ::paddle_mobile::framework::proto::OpDesc& ops(int index) const;
::paddle_mobile::framework::proto::OpDesc* mutable_ops(int index);
::paddle_mobile::framework::proto::OpDesc* add_ops();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc>*
mutable_ops();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc>&
ops() const;
// required int32 idx = 1;
bool has_idx() const;
void clear_idx();
static const int kIdxFieldNumber = 1;
::google::protobuf::int32 idx() const;
void set_idx(::google::protobuf::int32 value);
// required int32 parent_idx = 2;
bool has_parent_idx() const;
void clear_parent_idx();
static const int kParentIdxFieldNumber = 2;
::google::protobuf::int32 parent_idx() const;
void set_parent_idx(::google::protobuf::int32 value);
// optional int32 forward_block_idx = 5 [default = -1];
bool has_forward_block_idx() const;
void clear_forward_block_idx();
static const int kForwardBlockIdxFieldNumber = 5;
::google::protobuf::int32 forward_block_idx() const;
void set_forward_block_idx(::google::protobuf::int32 value);
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.BlockDesc)
private:
void set_has_idx();
void clear_has_idx();
void set_has_parent_idx();
void clear_has_parent_idx();
void set_has_forward_block_idx();
void clear_has_forward_block_idx();
// helper for ByteSizeLong()
size_t RequiredFieldsByteSizeFallback() const;
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarDesc>
vars_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc>
ops_;
::google::protobuf::int32 idx_;
::google::protobuf::int32 parent_idx_;
::google::protobuf::int32 forward_block_idx_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// -------------------------------------------------------------------
class ProgramDesc
: public ::google::protobuf::
MessageLite /* @@protoc_insertion_point(class_definition:paddle_mobile.framework.proto.ProgramDesc)
*/
{
public:
ProgramDesc();
virtual ~ProgramDesc();
ProgramDesc(const ProgramDesc& from);
inline ProgramDesc& operator=(const ProgramDesc& from) {
CopyFrom(from);
return *this;
}
#if LANG_CXX11
ProgramDesc(ProgramDesc&& from) noexcept : ProgramDesc() {
*this = ::std::move(from);
}
inline ProgramDesc& operator=(ProgramDesc&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
#endif
inline const ::std::string& unknown_fields() const {
return _internal_metadata_.unknown_fields();
}
inline ::std::string* mutable_unknown_fields() {
return _internal_metadata_.mutable_unknown_fields();
}
static const ProgramDesc& default_instance();
static inline const ProgramDesc* internal_default_instance() {
return reinterpret_cast<const ProgramDesc*>(
&_ProgramDesc_default_instance_);
}
static PROTOBUF_CONSTEXPR int const kIndexInFileMessages = 15;
void Swap(ProgramDesc* other);
friend void swap(ProgramDesc& a, ProgramDesc& b) { a.Swap(&b); }
// implements Message ----------------------------------------------
inline ProgramDesc* New() const PROTOBUF_FINAL { return New(NULL); }
ProgramDesc* New(::google::protobuf::Arena* arena) const PROTOBUF_FINAL;
void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from)
PROTOBUF_FINAL;
void CopyFrom(const ProgramDesc& from);
void MergeFrom(const ProgramDesc& from);
void Clear() PROTOBUF_FINAL;
bool IsInitialized() const PROTOBUF_FINAL;
size_t ByteSizeLong() const PROTOBUF_FINAL;
bool MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) PROTOBUF_FINAL;
void SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const PROTOBUF_FINAL;
void DiscardUnknownFields();
int GetCachedSize() const PROTOBUF_FINAL { return _cached_size_; }
private:
void SharedCtor();
void SharedDtor();
void SetCachedSize(int size) const;
void InternalSwap(ProgramDesc* other);
private:
inline ::google::protobuf::Arena* GetArenaNoVirtual() const { return NULL; }
inline void* MaybeArenaPtr() const { return NULL; }
public:
::std::string GetTypeName() const PROTOBUF_FINAL;
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .paddle_mobile.framework.proto.BlockDesc blocks = 1;
int blocks_size() const;
void clear_blocks();
static const int kBlocksFieldNumber = 1;
const ::paddle_mobile::framework::proto::BlockDesc& blocks(int index) const;
::paddle_mobile::framework::proto::BlockDesc* mutable_blocks(int index);
::paddle_mobile::framework::proto::BlockDesc* add_blocks();
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::BlockDesc>*
mutable_blocks();
const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::BlockDesc>&
blocks() const;
// @@protoc_insertion_point(class_scope:paddle_mobile.framework.proto.ProgramDesc)
private:
::google::protobuf::internal::InternalMetadataWithArenaLite
_internal_metadata_;
::google::protobuf::internal::HasBits<1> _has_bits_;
mutable int _cached_size_;
::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::BlockDesc>
blocks_;
friend struct protobuf_framework_2eproto::TableStruct;
};
// ===================================================================
// ===================================================================
#if !PROTOBUF_INLINE_NOT_IN_HEADERS
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif // __GNUC__
// OpDesc_Attr
// required string name = 1;
inline bool OpDesc_Attr::has_name() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void OpDesc_Attr::set_has_name() { _has_bits_[0] |= 0x00000001u; }
inline void OpDesc_Attr::clear_has_name() { _has_bits_[0] &= ~0x00000001u; }
inline void OpDesc_Attr::clear_name() {
name_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_name();
}
inline const ::std::string& OpDesc_Attr::name() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.name)
return name_.GetNoArena();
}
inline void OpDesc_Attr::set_name(const ::std::string& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.name)
}
#if LANG_CXX11
inline void OpDesc_Attr::set_name(::std::string&& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpDesc.Attr.name)
}
#endif
inline void OpDesc_Attr::set_name(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpDesc.Attr.name)
}
inline void OpDesc_Attr::set_name(const char* value, size_t size) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpDesc.Attr.name)
}
inline ::std::string* OpDesc_Attr::mutable_name() {
set_has_name();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.Attr.name)
return name_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpDesc_Attr::release_name() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpDesc.Attr.name)
clear_has_name();
return name_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpDesc_Attr::set_allocated_name(::std::string* name) {
if (name != NULL) {
set_has_name();
} else {
clear_has_name();
}
name_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpDesc.Attr.name)
}
// required .paddle_mobile.framework.proto.AttrType type = 2;
inline bool OpDesc_Attr::has_type() const {
return (_has_bits_[0] & 0x00000004u) != 0;
}
inline void OpDesc_Attr::set_has_type() { _has_bits_[0] |= 0x00000004u; }
inline void OpDesc_Attr::clear_has_type() { _has_bits_[0] &= ~0x00000004u; }
inline void OpDesc_Attr::clear_type() {
type_ = 0;
clear_has_type();
}
inline ::paddle_mobile::framework::proto::AttrType OpDesc_Attr::type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.type)
return static_cast<::paddle_mobile::framework::proto::AttrType>(type_);
}
inline void OpDesc_Attr::set_type(
::paddle_mobile::framework::proto::AttrType value) {
assert(::paddle_mobile::framework::proto::AttrType_IsValid(value));
set_has_type();
type_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.type)
}
// optional int32 i = 3;
inline bool OpDesc_Attr::has_i() const {
return (_has_bits_[0] & 0x00000008u) != 0;
}
inline void OpDesc_Attr::set_has_i() { _has_bits_[0] |= 0x00000008u; }
inline void OpDesc_Attr::clear_has_i() { _has_bits_[0] &= ~0x00000008u; }
inline void OpDesc_Attr::clear_i() {
i_ = 0;
clear_has_i();
}
inline ::google::protobuf::int32 OpDesc_Attr::i() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.i)
return i_;
}
inline void OpDesc_Attr::set_i(::google::protobuf::int32 value) {
set_has_i();
i_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.i)
}
// optional float f = 4;
inline bool OpDesc_Attr::has_f() const {
return (_has_bits_[0] & 0x00000010u) != 0;
}
inline void OpDesc_Attr::set_has_f() { _has_bits_[0] |= 0x00000010u; }
inline void OpDesc_Attr::clear_has_f() { _has_bits_[0] &= ~0x00000010u; }
inline void OpDesc_Attr::clear_f() {
f_ = 0;
clear_has_f();
}
inline float OpDesc_Attr::f() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.f)
return f_;
}
inline void OpDesc_Attr::set_f(float value) {
set_has_f();
f_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.f)
}
// optional string s = 5;
inline bool OpDesc_Attr::has_s() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void OpDesc_Attr::set_has_s() { _has_bits_[0] |= 0x00000002u; }
inline void OpDesc_Attr::clear_has_s() { _has_bits_[0] &= ~0x00000002u; }
inline void OpDesc_Attr::clear_s() {
s_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_s();
}
inline const ::std::string& OpDesc_Attr::s() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.s)
return s_.GetNoArena();
}
inline void OpDesc_Attr::set_s(const ::std::string& value) {
set_has_s();
s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.s)
}
#if LANG_CXX11
inline void OpDesc_Attr::set_s(::std::string&& value) {
set_has_s();
s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpDesc.Attr.s)
}
#endif
inline void OpDesc_Attr::set_s(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_s();
s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpDesc.Attr.s)
}
inline void OpDesc_Attr::set_s(const char* value, size_t size) {
set_has_s();
s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpDesc.Attr.s)
}
inline ::std::string* OpDesc_Attr::mutable_s() {
set_has_s();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.Attr.s)
return s_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpDesc_Attr::release_s() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpDesc.Attr.s)
clear_has_s();
return s_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpDesc_Attr::set_allocated_s(::std::string* s) {
if (s != NULL) {
set_has_s();
} else {
clear_has_s();
}
s_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), s);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpDesc.Attr.s)
}
// repeated int32 ints = 6;
inline int OpDesc_Attr::ints_size() const { return ints_.size(); }
inline void OpDesc_Attr::clear_ints() { ints_.Clear(); }
inline ::google::protobuf::int32 OpDesc_Attr::ints(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.ints)
return ints_.Get(index);
}
inline void OpDesc_Attr::set_ints(int index, ::google::protobuf::int32 value) {
ints_.Set(index, value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.ints)
}
inline void OpDesc_Attr::add_ints(::google::protobuf::int32 value) {
ints_.Add(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Attr.ints)
}
inline const ::google::protobuf::RepeatedField<::google::protobuf::int32>&
OpDesc_Attr::ints() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.Attr.ints)
return ints_;
}
inline ::google::protobuf::RepeatedField<::google::protobuf::int32>*
OpDesc_Attr::mutable_ints() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.Attr.ints)
return &ints_;
}
// repeated float floats = 7;
inline int OpDesc_Attr::floats_size() const { return floats_.size(); }
inline void OpDesc_Attr::clear_floats() { floats_.Clear(); }
inline float OpDesc_Attr::floats(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.floats)
return floats_.Get(index);
}
inline void OpDesc_Attr::set_floats(int index, float value) {
floats_.Set(index, value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.floats)
}
inline void OpDesc_Attr::add_floats(float value) {
floats_.Add(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Attr.floats)
}
inline const ::google::protobuf::RepeatedField<float>& OpDesc_Attr::floats()
const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.Attr.floats)
return floats_;
}
inline ::google::protobuf::RepeatedField<float>* OpDesc_Attr::mutable_floats() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.Attr.floats)
return &floats_;
}
// repeated string strings = 8;
inline int OpDesc_Attr::strings_size() const { return strings_.size(); }
inline void OpDesc_Attr::clear_strings() { strings_.Clear(); }
inline const ::std::string& OpDesc_Attr::strings(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.strings)
return strings_.Get(index);
}
inline ::std::string* OpDesc_Attr::mutable_strings(int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.Attr.strings)
return strings_.Mutable(index);
}
inline void OpDesc_Attr::set_strings(int index, const ::std::string& value) {
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.strings)
strings_.Mutable(index)->assign(value);
}
#if LANG_CXX11
inline void OpDesc_Attr::set_strings(int index, ::std::string&& value) {
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.strings)
strings_.Mutable(index)->assign(std::move(value));
}
#endif
inline void OpDesc_Attr::set_strings(int index, const char* value) {
GOOGLE_DCHECK(value != NULL);
strings_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpDesc.Attr.strings)
}
inline void OpDesc_Attr::set_strings(int index, const char* value,
size_t size) {
strings_.Mutable(index)->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpDesc.Attr.strings)
}
inline ::std::string* OpDesc_Attr::add_strings() {
// @@protoc_insertion_point(field_add_mutable:paddle_mobile.framework.proto.OpDesc.Attr.strings)
return strings_.Add();
}
inline void OpDesc_Attr::add_strings(const ::std::string& value) {
strings_.Add()->assign(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Attr.strings)
}
#if LANG_CXX11
inline void OpDesc_Attr::add_strings(::std::string&& value) {
strings_.Add(std::move(value));
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Attr.strings)
}
#endif
inline void OpDesc_Attr::add_strings(const char* value) {
GOOGLE_DCHECK(value != NULL);
strings_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:paddle_mobile.framework.proto.OpDesc.Attr.strings)
}
inline void OpDesc_Attr::add_strings(const char* value, size_t size) {
strings_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:paddle_mobile.framework.proto.OpDesc.Attr.strings)
}
inline const ::google::protobuf::RepeatedPtrField<::std::string>&
OpDesc_Attr::strings() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.Attr.strings)
return strings_;
}
inline ::google::protobuf::RepeatedPtrField<::std::string>*
OpDesc_Attr::mutable_strings() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.Attr.strings)
return &strings_;
}
// optional bool b = 10;
inline bool OpDesc_Attr::has_b() const {
return (_has_bits_[0] & 0x00000020u) != 0;
}
inline void OpDesc_Attr::set_has_b() { _has_bits_[0] |= 0x00000020u; }
inline void OpDesc_Attr::clear_has_b() { _has_bits_[0] &= ~0x00000020u; }
inline void OpDesc_Attr::clear_b() {
b_ = false;
clear_has_b();
}
inline bool OpDesc_Attr::b() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.b)
return b_;
}
inline void OpDesc_Attr::set_b(bool value) {
set_has_b();
b_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.b)
}
// repeated bool bools = 11;
inline int OpDesc_Attr::bools_size() const { return bools_.size(); }
inline void OpDesc_Attr::clear_bools() { bools_.Clear(); }
inline bool OpDesc_Attr::bools(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.bools)
return bools_.Get(index);
}
inline void OpDesc_Attr::set_bools(int index, bool value) {
bools_.Set(index, value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.bools)
}
inline void OpDesc_Attr::add_bools(bool value) {
bools_.Add(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Attr.bools)
}
inline const ::google::protobuf::RepeatedField<bool>& OpDesc_Attr::bools()
const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.Attr.bools)
return bools_;
}
inline ::google::protobuf::RepeatedField<bool>* OpDesc_Attr::mutable_bools() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.Attr.bools)
return &bools_;
}
// optional int32 block_idx = 12;
inline bool OpDesc_Attr::has_block_idx() const {
return (_has_bits_[0] & 0x00000080u) != 0;
}
inline void OpDesc_Attr::set_has_block_idx() { _has_bits_[0] |= 0x00000080u; }
inline void OpDesc_Attr::clear_has_block_idx() {
_has_bits_[0] &= ~0x00000080u;
}
inline void OpDesc_Attr::clear_block_idx() {
block_idx_ = 0;
clear_has_block_idx();
}
inline ::google::protobuf::int32 OpDesc_Attr::block_idx() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.block_idx)
return block_idx_;
}
inline void OpDesc_Attr::set_block_idx(::google::protobuf::int32 value) {
set_has_block_idx();
block_idx_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.block_idx)
}
// optional int64 l = 13;
inline bool OpDesc_Attr::has_l() const {
return (_has_bits_[0] & 0x00000040u) != 0;
}
inline void OpDesc_Attr::set_has_l() { _has_bits_[0] |= 0x00000040u; }
inline void OpDesc_Attr::clear_has_l() { _has_bits_[0] &= ~0x00000040u; }
inline void OpDesc_Attr::clear_l() {
l_ = GOOGLE_LONGLONG(0);
clear_has_l();
}
inline ::google::protobuf::int64 OpDesc_Attr::l() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Attr.l)
return l_;
}
inline void OpDesc_Attr::set_l(::google::protobuf::int64 value) {
set_has_l();
l_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Attr.l)
}
// -------------------------------------------------------------------
// OpDesc_Var
// required string parameter = 1;
inline bool OpDesc_Var::has_parameter() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void OpDesc_Var::set_has_parameter() { _has_bits_[0] |= 0x00000001u; }
inline void OpDesc_Var::clear_has_parameter() { _has_bits_[0] &= ~0x00000001u; }
inline void OpDesc_Var::clear_parameter() {
parameter_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_parameter();
}
inline const ::std::string& OpDesc_Var::parameter() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Var.parameter)
return parameter_.GetNoArena();
}
inline void OpDesc_Var::set_parameter(const ::std::string& value) {
set_has_parameter();
parameter_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Var.parameter)
}
#if LANG_CXX11
inline void OpDesc_Var::set_parameter(::std::string&& value) {
set_has_parameter();
parameter_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpDesc.Var.parameter)
}
#endif
inline void OpDesc_Var::set_parameter(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_parameter();
parameter_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpDesc.Var.parameter)
}
inline void OpDesc_Var::set_parameter(const char* value, size_t size) {
set_has_parameter();
parameter_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpDesc.Var.parameter)
}
inline ::std::string* OpDesc_Var::mutable_parameter() {
set_has_parameter();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.Var.parameter)
return parameter_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpDesc_Var::release_parameter() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpDesc.Var.parameter)
clear_has_parameter();
return parameter_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpDesc_Var::set_allocated_parameter(::std::string* parameter) {
if (parameter != NULL) {
set_has_parameter();
} else {
clear_has_parameter();
}
parameter_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), parameter);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpDesc.Var.parameter)
}
// repeated string arguments = 2;
inline int OpDesc_Var::arguments_size() const { return arguments_.size(); }
inline void OpDesc_Var::clear_arguments() { arguments_.Clear(); }
inline const ::std::string& OpDesc_Var::arguments(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.Var.arguments)
return arguments_.Get(index);
}
inline ::std::string* OpDesc_Var::mutable_arguments(int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.Var.arguments)
return arguments_.Mutable(index);
}
inline void OpDesc_Var::set_arguments(int index, const ::std::string& value) {
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Var.arguments)
arguments_.Mutable(index)->assign(value);
}
#if LANG_CXX11
inline void OpDesc_Var::set_arguments(int index, ::std::string&& value) {
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.Var.arguments)
arguments_.Mutable(index)->assign(std::move(value));
}
#endif
inline void OpDesc_Var::set_arguments(int index, const char* value) {
GOOGLE_DCHECK(value != NULL);
arguments_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpDesc.Var.arguments)
}
inline void OpDesc_Var::set_arguments(int index, const char* value,
size_t size) {
arguments_.Mutable(index)->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpDesc.Var.arguments)
}
inline ::std::string* OpDesc_Var::add_arguments() {
// @@protoc_insertion_point(field_add_mutable:paddle_mobile.framework.proto.OpDesc.Var.arguments)
return arguments_.Add();
}
inline void OpDesc_Var::add_arguments(const ::std::string& value) {
arguments_.Add()->assign(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Var.arguments)
}
#if LANG_CXX11
inline void OpDesc_Var::add_arguments(::std::string&& value) {
arguments_.Add(std::move(value));
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.Var.arguments)
}
#endif
inline void OpDesc_Var::add_arguments(const char* value) {
GOOGLE_DCHECK(value != NULL);
arguments_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:paddle_mobile.framework.proto.OpDesc.Var.arguments)
}
inline void OpDesc_Var::add_arguments(const char* value, size_t size) {
arguments_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:paddle_mobile.framework.proto.OpDesc.Var.arguments)
}
inline const ::google::protobuf::RepeatedPtrField<::std::string>&
OpDesc_Var::arguments() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.Var.arguments)
return arguments_;
}
inline ::google::protobuf::RepeatedPtrField<::std::string>*
OpDesc_Var::mutable_arguments() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.Var.arguments)
return &arguments_;
}
// -------------------------------------------------------------------
// OpDesc
// required string type = 3;
inline bool OpDesc::has_type() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void OpDesc::set_has_type() { _has_bits_[0] |= 0x00000001u; }
inline void OpDesc::clear_has_type() { _has_bits_[0] &= ~0x00000001u; }
inline void OpDesc::clear_type() {
type_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_type();
}
inline const ::std::string& OpDesc::type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.type)
return type_.GetNoArena();
}
inline void OpDesc::set_type(const ::std::string& value) {
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.type)
}
#if LANG_CXX11
inline void OpDesc::set_type(::std::string&& value) {
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpDesc.type)
}
#endif
inline void OpDesc::set_type(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpDesc.type)
}
inline void OpDesc::set_type(const char* value, size_t size) {
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpDesc.type)
}
inline ::std::string* OpDesc::mutable_type() {
set_has_type();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.type)
return type_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpDesc::release_type() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpDesc.type)
clear_has_type();
return type_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpDesc::set_allocated_type(::std::string* type) {
if (type != NULL) {
set_has_type();
} else {
clear_has_type();
}
type_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpDesc.type)
}
// repeated .paddle_mobile.framework.proto.OpDesc.Var inputs = 1;
inline int OpDesc::inputs_size() const { return inputs_.size(); }
inline void OpDesc::clear_inputs() { inputs_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpDesc_Var& OpDesc::inputs(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.inputs)
return inputs_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpDesc_Var* OpDesc::mutable_inputs(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.inputs)
return inputs_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpDesc_Var* OpDesc::add_inputs() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.inputs)
return inputs_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>*
OpDesc::mutable_inputs() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.inputs)
return &inputs_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>&
OpDesc::inputs() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.inputs)
return inputs_;
}
// repeated .paddle_mobile.framework.proto.OpDesc.Var outputs = 2;
inline int OpDesc::outputs_size() const { return outputs_.size(); }
inline void OpDesc::clear_outputs() { outputs_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpDesc_Var& OpDesc::outputs(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.outputs)
return outputs_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpDesc_Var* OpDesc::mutable_outputs(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.outputs)
return outputs_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpDesc_Var* OpDesc::add_outputs() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.outputs)
return outputs_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>*
OpDesc::mutable_outputs() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.outputs)
return &outputs_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Var>&
OpDesc::outputs() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.outputs)
return outputs_;
}
// repeated .paddle_mobile.framework.proto.OpDesc.Attr attrs = 4;
inline int OpDesc::attrs_size() const { return attrs_.size(); }
inline void OpDesc::clear_attrs() { attrs_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpDesc_Attr& OpDesc::attrs(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.attrs)
return attrs_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpDesc_Attr* OpDesc::mutable_attrs(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpDesc.attrs)
return attrs_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpDesc_Attr* OpDesc::add_attrs() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpDesc.attrs)
return attrs_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Attr>*
OpDesc::mutable_attrs() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpDesc.attrs)
return &attrs_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc_Attr>&
OpDesc::attrs() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpDesc.attrs)
return attrs_;
}
// optional bool is_target = 5 [default = false];
inline bool OpDesc::has_is_target() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void OpDesc::set_has_is_target() { _has_bits_[0] |= 0x00000002u; }
inline void OpDesc::clear_has_is_target() { _has_bits_[0] &= ~0x00000002u; }
inline void OpDesc::clear_is_target() {
is_target_ = false;
clear_has_is_target();
}
inline bool OpDesc::is_target() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpDesc.is_target)
return is_target_;
}
inline void OpDesc::set_is_target(bool value) {
set_has_is_target();
is_target_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpDesc.is_target)
}
// -------------------------------------------------------------------
// OpProto_Var
// required string name = 1;
inline bool OpProto_Var::has_name() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void OpProto_Var::set_has_name() { _has_bits_[0] |= 0x00000001u; }
inline void OpProto_Var::clear_has_name() { _has_bits_[0] &= ~0x00000001u; }
inline void OpProto_Var::clear_name() {
name_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_name();
}
inline const ::std::string& OpProto_Var::name() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Var.name)
return name_.GetNoArena();
}
inline void OpProto_Var::set_name(const ::std::string& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Var.name)
}
#if LANG_CXX11
inline void OpProto_Var::set_name(::std::string&& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpProto.Var.name)
}
#endif
inline void OpProto_Var::set_name(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpProto.Var.name)
}
inline void OpProto_Var::set_name(const char* value, size_t size) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpProto.Var.name)
}
inline ::std::string* OpProto_Var::mutable_name() {
set_has_name();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.Var.name)
return name_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpProto_Var::release_name() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpProto.Var.name)
clear_has_name();
return name_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpProto_Var::set_allocated_name(::std::string* name) {
if (name != NULL) {
set_has_name();
} else {
clear_has_name();
}
name_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpProto.Var.name)
}
// required string comment = 2;
inline bool OpProto_Var::has_comment() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void OpProto_Var::set_has_comment() { _has_bits_[0] |= 0x00000002u; }
inline void OpProto_Var::clear_has_comment() { _has_bits_[0] &= ~0x00000002u; }
inline void OpProto_Var::clear_comment() {
comment_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_comment();
}
inline const ::std::string& OpProto_Var::comment() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Var.comment)
return comment_.GetNoArena();
}
inline void OpProto_Var::set_comment(const ::std::string& value) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Var.comment)
}
#if LANG_CXX11
inline void OpProto_Var::set_comment(::std::string&& value) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpProto.Var.comment)
}
#endif
inline void OpProto_Var::set_comment(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpProto.Var.comment)
}
inline void OpProto_Var::set_comment(const char* value, size_t size) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpProto.Var.comment)
}
inline ::std::string* OpProto_Var::mutable_comment() {
set_has_comment();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.Var.comment)
return comment_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpProto_Var::release_comment() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpProto.Var.comment)
clear_has_comment();
return comment_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpProto_Var::set_allocated_comment(::std::string* comment) {
if (comment != NULL) {
set_has_comment();
} else {
clear_has_comment();
}
comment_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), comment);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpProto.Var.comment)
}
// optional bool duplicable = 3 [default = false];
inline bool OpProto_Var::has_duplicable() const {
return (_has_bits_[0] & 0x00000004u) != 0;
}
inline void OpProto_Var::set_has_duplicable() { _has_bits_[0] |= 0x00000004u; }
inline void OpProto_Var::clear_has_duplicable() {
_has_bits_[0] &= ~0x00000004u;
}
inline void OpProto_Var::clear_duplicable() {
duplicable_ = false;
clear_has_duplicable();
}
inline bool OpProto_Var::duplicable() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Var.duplicable)
return duplicable_;
}
inline void OpProto_Var::set_duplicable(bool value) {
set_has_duplicable();
duplicable_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Var.duplicable)
}
// optional bool intermediate = 4 [default = false];
inline bool OpProto_Var::has_intermediate() const {
return (_has_bits_[0] & 0x00000008u) != 0;
}
inline void OpProto_Var::set_has_intermediate() {
_has_bits_[0] |= 0x00000008u;
}
inline void OpProto_Var::clear_has_intermediate() {
_has_bits_[0] &= ~0x00000008u;
}
inline void OpProto_Var::clear_intermediate() {
intermediate_ = false;
clear_has_intermediate();
}
inline bool OpProto_Var::intermediate() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Var.intermediate)
return intermediate_;
}
inline void OpProto_Var::set_intermediate(bool value) {
set_has_intermediate();
intermediate_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Var.intermediate)
}
// optional bool dispensable = 5 [default = false];
inline bool OpProto_Var::has_dispensable() const {
return (_has_bits_[0] & 0x00000010u) != 0;
}
inline void OpProto_Var::set_has_dispensable() { _has_bits_[0] |= 0x00000010u; }
inline void OpProto_Var::clear_has_dispensable() {
_has_bits_[0] &= ~0x00000010u;
}
inline void OpProto_Var::clear_dispensable() {
dispensable_ = false;
clear_has_dispensable();
}
inline bool OpProto_Var::dispensable() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Var.dispensable)
return dispensable_;
}
inline void OpProto_Var::set_dispensable(bool value) {
set_has_dispensable();
dispensable_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Var.dispensable)
}
// -------------------------------------------------------------------
// OpProto_Attr
// required string name = 1;
inline bool OpProto_Attr::has_name() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void OpProto_Attr::set_has_name() { _has_bits_[0] |= 0x00000001u; }
inline void OpProto_Attr::clear_has_name() { _has_bits_[0] &= ~0x00000001u; }
inline void OpProto_Attr::clear_name() {
name_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_name();
}
inline const ::std::string& OpProto_Attr::name() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Attr.name)
return name_.GetNoArena();
}
inline void OpProto_Attr::set_name(const ::std::string& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Attr.name)
}
#if LANG_CXX11
inline void OpProto_Attr::set_name(::std::string&& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpProto.Attr.name)
}
#endif
inline void OpProto_Attr::set_name(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpProto.Attr.name)
}
inline void OpProto_Attr::set_name(const char* value, size_t size) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpProto.Attr.name)
}
inline ::std::string* OpProto_Attr::mutable_name() {
set_has_name();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.Attr.name)
return name_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpProto_Attr::release_name() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpProto.Attr.name)
clear_has_name();
return name_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpProto_Attr::set_allocated_name(::std::string* name) {
if (name != NULL) {
set_has_name();
} else {
clear_has_name();
}
name_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpProto.Attr.name)
}
// required .paddle_mobile.framework.proto.AttrType type = 2;
inline bool OpProto_Attr::has_type() const {
return (_has_bits_[0] & 0x00000004u) != 0;
}
inline void OpProto_Attr::set_has_type() { _has_bits_[0] |= 0x00000004u; }
inline void OpProto_Attr::clear_has_type() { _has_bits_[0] &= ~0x00000004u; }
inline void OpProto_Attr::clear_type() {
type_ = 0;
clear_has_type();
}
inline ::paddle_mobile::framework::proto::AttrType OpProto_Attr::type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Attr.type)
return static_cast<::paddle_mobile::framework::proto::AttrType>(type_);
}
inline void OpProto_Attr::set_type(
::paddle_mobile::framework::proto::AttrType value) {
assert(::paddle_mobile::framework::proto::AttrType_IsValid(value));
set_has_type();
type_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Attr.type)
}
// required string comment = 3;
inline bool OpProto_Attr::has_comment() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void OpProto_Attr::set_has_comment() { _has_bits_[0] |= 0x00000002u; }
inline void OpProto_Attr::clear_has_comment() { _has_bits_[0] &= ~0x00000002u; }
inline void OpProto_Attr::clear_comment() {
comment_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_comment();
}
inline const ::std::string& OpProto_Attr::comment() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Attr.comment)
return comment_.GetNoArena();
}
inline void OpProto_Attr::set_comment(const ::std::string& value) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Attr.comment)
}
#if LANG_CXX11
inline void OpProto_Attr::set_comment(::std::string&& value) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpProto.Attr.comment)
}
#endif
inline void OpProto_Attr::set_comment(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpProto.Attr.comment)
}
inline void OpProto_Attr::set_comment(const char* value, size_t size) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpProto.Attr.comment)
}
inline ::std::string* OpProto_Attr::mutable_comment() {
set_has_comment();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.Attr.comment)
return comment_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpProto_Attr::release_comment() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpProto.Attr.comment)
clear_has_comment();
return comment_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpProto_Attr::set_allocated_comment(::std::string* comment) {
if (comment != NULL) {
set_has_comment();
} else {
clear_has_comment();
}
comment_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), comment);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpProto.Attr.comment)
}
// optional bool generated = 4 [default = false];
inline bool OpProto_Attr::has_generated() const {
return (_has_bits_[0] & 0x00000008u) != 0;
}
inline void OpProto_Attr::set_has_generated() { _has_bits_[0] |= 0x00000008u; }
inline void OpProto_Attr::clear_has_generated() {
_has_bits_[0] &= ~0x00000008u;
}
inline void OpProto_Attr::clear_generated() {
generated_ = false;
clear_has_generated();
}
inline bool OpProto_Attr::generated() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.Attr.generated)
return generated_;
}
inline void OpProto_Attr::set_generated(bool value) {
set_has_generated();
generated_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.Attr.generated)
}
// -------------------------------------------------------------------
// OpProto
// required string type = 1;
inline bool OpProto::has_type() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void OpProto::set_has_type() { _has_bits_[0] |= 0x00000001u; }
inline void OpProto::clear_has_type() { _has_bits_[0] &= ~0x00000001u; }
inline void OpProto::clear_type() {
type_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_type();
}
inline const ::std::string& OpProto::type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.type)
return type_.GetNoArena();
}
inline void OpProto::set_type(const ::std::string& value) {
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.type)
}
#if LANG_CXX11
inline void OpProto::set_type(::std::string&& value) {
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpProto.type)
}
#endif
inline void OpProto::set_type(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpProto.type)
}
inline void OpProto::set_type(const char* value, size_t size) {
set_has_type();
type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpProto.type)
}
inline ::std::string* OpProto::mutable_type() {
set_has_type();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.type)
return type_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpProto::release_type() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpProto.type)
clear_has_type();
return type_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpProto::set_allocated_type(::std::string* type) {
if (type != NULL) {
set_has_type();
} else {
clear_has_type();
}
type_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpProto.type)
}
// repeated .paddle_mobile.framework.proto.OpProto.Var inputs = 2;
inline int OpProto::inputs_size() const { return inputs_.size(); }
inline void OpProto::clear_inputs() { inputs_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpProto_Var& OpProto::inputs(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.inputs)
return inputs_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpProto_Var* OpProto::mutable_inputs(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.inputs)
return inputs_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpProto_Var* OpProto::add_inputs() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpProto.inputs)
return inputs_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>*
OpProto::mutable_inputs() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpProto.inputs)
return &inputs_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>&
OpProto::inputs() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpProto.inputs)
return inputs_;
}
// repeated .paddle_mobile.framework.proto.OpProto.Var outputs = 3;
inline int OpProto::outputs_size() const { return outputs_.size(); }
inline void OpProto::clear_outputs() { outputs_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpProto_Var& OpProto::outputs(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.outputs)
return outputs_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpProto_Var* OpProto::mutable_outputs(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.outputs)
return outputs_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpProto_Var* OpProto::add_outputs() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpProto.outputs)
return outputs_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>*
OpProto::mutable_outputs() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpProto.outputs)
return &outputs_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Var>&
OpProto::outputs() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpProto.outputs)
return outputs_;
}
// repeated .paddle_mobile.framework.proto.OpProto.Attr attrs = 4;
inline int OpProto::attrs_size() const { return attrs_.size(); }
inline void OpProto::clear_attrs() { attrs_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpProto_Attr& OpProto::attrs(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.attrs)
return attrs_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpProto_Attr* OpProto::mutable_attrs(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.attrs)
return attrs_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpProto_Attr* OpProto::add_attrs() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.OpProto.attrs)
return attrs_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Attr>*
OpProto::mutable_attrs() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.OpProto.attrs)
return &attrs_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpProto_Attr>&
OpProto::attrs() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.OpProto.attrs)
return attrs_;
}
// required string comment = 5;
inline bool OpProto::has_comment() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void OpProto::set_has_comment() { _has_bits_[0] |= 0x00000002u; }
inline void OpProto::clear_has_comment() { _has_bits_[0] &= ~0x00000002u; }
inline void OpProto::clear_comment() {
comment_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_comment();
}
inline const ::std::string& OpProto::comment() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.OpProto.comment)
return comment_.GetNoArena();
}
inline void OpProto::set_comment(const ::std::string& value) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.OpProto.comment)
}
#if LANG_CXX11
inline void OpProto::set_comment(::std::string&& value) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.OpProto.comment)
}
#endif
inline void OpProto::set_comment(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.OpProto.comment)
}
inline void OpProto::set_comment(const char* value, size_t size) {
set_has_comment();
comment_.SetNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.OpProto.comment)
}
inline ::std::string* OpProto::mutable_comment() {
set_has_comment();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.OpProto.comment)
return comment_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* OpProto::release_comment() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.OpProto.comment)
clear_has_comment();
return comment_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void OpProto::set_allocated_comment(::std::string* comment) {
if (comment != NULL) {
set_has_comment();
} else {
clear_has_comment();
}
comment_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), comment);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.OpProto.comment)
}
// -------------------------------------------------------------------
// VarType_TensorDesc
// required .paddle_mobile.framework.proto.VarType.Type data_type = 1;
inline bool VarType_TensorDesc::has_data_type() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void VarType_TensorDesc::set_has_data_type() {
_has_bits_[0] |= 0x00000001u;
}
inline void VarType_TensorDesc::clear_has_data_type() {
_has_bits_[0] &= ~0x00000001u;
}
inline void VarType_TensorDesc::clear_data_type() {
data_type_ = 0;
clear_has_data_type();
}
inline ::paddle_mobile::framework::proto::VarType_Type
VarType_TensorDesc::data_type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.TensorDesc.data_type)
return static_cast<::paddle_mobile::framework::proto::VarType_Type>(
data_type_);
}
inline void VarType_TensorDesc::set_data_type(
::paddle_mobile::framework::proto::VarType_Type value) {
assert(::paddle_mobile::framework::proto::VarType_Type_IsValid(value));
set_has_data_type();
data_type_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.TensorDesc.data_type)
}
// repeated int64 dims = 2;
inline int VarType_TensorDesc::dims_size() const { return dims_.size(); }
inline void VarType_TensorDesc::clear_dims() { dims_.Clear(); }
inline ::google::protobuf::int64 VarType_TensorDesc::dims(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.TensorDesc.dims)
return dims_.Get(index);
}
inline void VarType_TensorDesc::set_dims(int index,
::google::protobuf::int64 value) {
dims_.Set(index, value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.TensorDesc.dims)
}
inline void VarType_TensorDesc::add_dims(::google::protobuf::int64 value) {
dims_.Add(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.VarType.TensorDesc.dims)
}
inline const ::google::protobuf::RepeatedField<::google::protobuf::int64>&
VarType_TensorDesc::dims() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.VarType.TensorDesc.dims)
return dims_;
}
inline ::google::protobuf::RepeatedField<::google::protobuf::int64>*
VarType_TensorDesc::mutable_dims() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.VarType.TensorDesc.dims)
return &dims_;
}
// -------------------------------------------------------------------
// VarType_LoDTensorDesc
// required .paddle_mobile.framework.proto.VarType.TensorDesc tensor = 1;
inline bool VarType_LoDTensorDesc::has_tensor() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void VarType_LoDTensorDesc::set_has_tensor() {
_has_bits_[0] |= 0x00000001u;
}
inline void VarType_LoDTensorDesc::clear_has_tensor() {
_has_bits_[0] &= ~0x00000001u;
}
inline void VarType_LoDTensorDesc::clear_tensor() {
if (tensor_ != NULL)
tensor_->::paddle_mobile::framework::proto::VarType_TensorDesc::Clear();
clear_has_tensor();
}
inline const ::paddle_mobile::framework::proto::VarType_TensorDesc&
VarType_LoDTensorDesc::tensor() const {
const ::paddle_mobile::framework::proto::VarType_TensorDesc* p = tensor_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.LoDTensorDesc.tensor)
return p != NULL ? *p : *reinterpret_cast<const ::paddle_mobile::framework::
proto::VarType_TensorDesc*>(
&::paddle_mobile::framework::proto::
_VarType_TensorDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_TensorDesc*
VarType_LoDTensorDesc::mutable_tensor() {
set_has_tensor();
if (tensor_ == NULL) {
tensor_ = new ::paddle_mobile::framework::proto::VarType_TensorDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.LoDTensorDesc.tensor)
return tensor_;
}
inline ::paddle_mobile::framework::proto::VarType_TensorDesc*
VarType_LoDTensorDesc::release_tensor() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.LoDTensorDesc.tensor)
clear_has_tensor();
::paddle_mobile::framework::proto::VarType_TensorDesc* temp = tensor_;
tensor_ = NULL;
return temp;
}
inline void VarType_LoDTensorDesc::set_allocated_tensor(
::paddle_mobile::framework::proto::VarType_TensorDesc* tensor) {
delete tensor_;
tensor_ = tensor;
if (tensor) {
set_has_tensor();
} else {
clear_has_tensor();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.LoDTensorDesc.tensor)
}
// optional int32 lod_level = 2 [default = 0];
inline bool VarType_LoDTensorDesc::has_lod_level() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void VarType_LoDTensorDesc::set_has_lod_level() {
_has_bits_[0] |= 0x00000002u;
}
inline void VarType_LoDTensorDesc::clear_has_lod_level() {
_has_bits_[0] &= ~0x00000002u;
}
inline void VarType_LoDTensorDesc::clear_lod_level() {
lod_level_ = 0;
clear_has_lod_level();
}
inline ::google::protobuf::int32 VarType_LoDTensorDesc::lod_level() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.LoDTensorDesc.lod_level)
return lod_level_;
}
inline void VarType_LoDTensorDesc::set_lod_level(
::google::protobuf::int32 value) {
set_has_lod_level();
lod_level_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.LoDTensorDesc.lod_level)
}
// -------------------------------------------------------------------
// VarType_LoDTensorArrayDesc
// required .paddle_mobile.framework.proto.VarType.TensorDesc tensor = 1;
inline bool VarType_LoDTensorArrayDesc::has_tensor() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void VarType_LoDTensorArrayDesc::set_has_tensor() {
_has_bits_[0] |= 0x00000001u;
}
inline void VarType_LoDTensorArrayDesc::clear_has_tensor() {
_has_bits_[0] &= ~0x00000001u;
}
inline void VarType_LoDTensorArrayDesc::clear_tensor() {
if (tensor_ != NULL)
tensor_->::paddle_mobile::framework::proto::VarType_TensorDesc::Clear();
clear_has_tensor();
}
inline const ::paddle_mobile::framework::proto::VarType_TensorDesc&
VarType_LoDTensorArrayDesc::tensor() const {
const ::paddle_mobile::framework::proto::VarType_TensorDesc* p = tensor_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc.tensor)
return p != NULL ? *p : *reinterpret_cast<const ::paddle_mobile::framework::
proto::VarType_TensorDesc*>(
&::paddle_mobile::framework::proto::
_VarType_TensorDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_TensorDesc*
VarType_LoDTensorArrayDesc::mutable_tensor() {
set_has_tensor();
if (tensor_ == NULL) {
tensor_ = new ::paddle_mobile::framework::proto::VarType_TensorDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc.tensor)
return tensor_;
}
inline ::paddle_mobile::framework::proto::VarType_TensorDesc*
VarType_LoDTensorArrayDesc::release_tensor() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc.tensor)
clear_has_tensor();
::paddle_mobile::framework::proto::VarType_TensorDesc* temp = tensor_;
tensor_ = NULL;
return temp;
}
inline void VarType_LoDTensorArrayDesc::set_allocated_tensor(
::paddle_mobile::framework::proto::VarType_TensorDesc* tensor) {
delete tensor_;
tensor_ = tensor;
if (tensor) {
set_has_tensor();
} else {
clear_has_tensor();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc.tensor)
}
// optional int32 lod_level = 2 [default = 0];
inline bool VarType_LoDTensorArrayDesc::has_lod_level() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void VarType_LoDTensorArrayDesc::set_has_lod_level() {
_has_bits_[0] |= 0x00000002u;
}
inline void VarType_LoDTensorArrayDesc::clear_has_lod_level() {
_has_bits_[0] &= ~0x00000002u;
}
inline void VarType_LoDTensorArrayDesc::clear_lod_level() {
lod_level_ = 0;
clear_has_lod_level();
}
inline ::google::protobuf::int32 VarType_LoDTensorArrayDesc::lod_level() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc.lod_level)
return lod_level_;
}
inline void VarType_LoDTensorArrayDesc::set_lod_level(
::google::protobuf::int32 value) {
set_has_lod_level();
lod_level_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc.lod_level)
}
// -------------------------------------------------------------------
// VarType_ReaderDesc
// repeated .paddle_mobile.framework.proto.VarType.LoDTensorDesc lod_tensor = 1;
inline int VarType_ReaderDesc::lod_tensor_size() const {
return lod_tensor_.size();
}
inline void VarType_ReaderDesc::clear_lod_tensor() { lod_tensor_.Clear(); }
inline const ::paddle_mobile::framework::proto::VarType_LoDTensorDesc&
VarType_ReaderDesc::lod_tensor(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.ReaderDesc.lod_tensor)
return lod_tensor_.Get(index);
}
inline ::paddle_mobile::framework::proto::VarType_LoDTensorDesc*
VarType_ReaderDesc::mutable_lod_tensor(int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.ReaderDesc.lod_tensor)
return lod_tensor_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::VarType_LoDTensorDesc*
VarType_ReaderDesc::add_lod_tensor() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.VarType.ReaderDesc.lod_tensor)
return lod_tensor_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarType_LoDTensorDesc>*
VarType_ReaderDesc::mutable_lod_tensor() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.VarType.ReaderDesc.lod_tensor)
return &lod_tensor_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarType_LoDTensorDesc>&
VarType_ReaderDesc::lod_tensor() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.VarType.ReaderDesc.lod_tensor)
return lod_tensor_;
}
// -------------------------------------------------------------------
// VarType_ChannelDesc
// required .paddle_mobile.framework.proto.VarType.Type data_type = 1;
inline bool VarType_ChannelDesc::has_data_type() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void VarType_ChannelDesc::set_has_data_type() {
_has_bits_[0] |= 0x00000002u;
}
inline void VarType_ChannelDesc::clear_has_data_type() {
_has_bits_[0] &= ~0x00000002u;
}
inline void VarType_ChannelDesc::clear_data_type() {
data_type_ = 0;
clear_has_data_type();
}
inline ::paddle_mobile::framework::proto::VarType_Type
VarType_ChannelDesc::data_type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.ChannelDesc.data_type)
return static_cast<::paddle_mobile::framework::proto::VarType_Type>(
data_type_);
}
inline void VarType_ChannelDesc::set_data_type(
::paddle_mobile::framework::proto::VarType_Type value) {
assert(::paddle_mobile::framework::proto::VarType_Type_IsValid(value));
set_has_data_type();
data_type_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.ChannelDesc.data_type)
}
// required int64 capacity = 2;
inline bool VarType_ChannelDesc::has_capacity() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void VarType_ChannelDesc::set_has_capacity() {
_has_bits_[0] |= 0x00000001u;
}
inline void VarType_ChannelDesc::clear_has_capacity() {
_has_bits_[0] &= ~0x00000001u;
}
inline void VarType_ChannelDesc::clear_capacity() {
capacity_ = GOOGLE_LONGLONG(0);
clear_has_capacity();
}
inline ::google::protobuf::int64 VarType_ChannelDesc::capacity() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.ChannelDesc.capacity)
return capacity_;
}
inline void VarType_ChannelDesc::set_capacity(::google::protobuf::int64 value) {
set_has_capacity();
capacity_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.ChannelDesc.capacity)
}
// -------------------------------------------------------------------
// VarType_Tuple
// repeated .paddle_mobile.framework.proto.VarType.Type element_type = 1;
inline int VarType_Tuple::element_type_size() const {
return element_type_.size();
}
inline void VarType_Tuple::clear_element_type() { element_type_.Clear(); }
inline ::paddle_mobile::framework::proto::VarType_Type
VarType_Tuple::element_type(int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.Tuple.element_type)
return static_cast<::paddle_mobile::framework::proto::VarType_Type>(
element_type_.Get(index));
}
inline void VarType_Tuple::set_element_type(
int index, ::paddle_mobile::framework::proto::VarType_Type value) {
assert(::paddle_mobile::framework::proto::VarType_Type_IsValid(value));
element_type_.Set(index, value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.Tuple.element_type)
}
inline void VarType_Tuple::add_element_type(
::paddle_mobile::framework::proto::VarType_Type value) {
assert(::paddle_mobile::framework::proto::VarType_Type_IsValid(value));
element_type_.Add(value);
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.VarType.Tuple.element_type)
}
inline const ::google::protobuf::RepeatedField<int>&
VarType_Tuple::element_type() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.VarType.Tuple.element_type)
return element_type_;
}
inline ::google::protobuf::RepeatedField<int>*
VarType_Tuple::mutable_element_type() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.VarType.Tuple.element_type)
return &element_type_;
}
// -------------------------------------------------------------------
// VarType
// required .paddle_mobile.framework.proto.VarType.Type type = 1;
inline bool VarType::has_type() const {
return (_has_bits_[0] & 0x00000040u) != 0;
}
inline void VarType::set_has_type() { _has_bits_[0] |= 0x00000040u; }
inline void VarType::clear_has_type() { _has_bits_[0] &= ~0x00000040u; }
inline void VarType::clear_type() {
type_ = 0;
clear_has_type();
}
inline ::paddle_mobile::framework::proto::VarType_Type VarType::type() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.type)
return static_cast<::paddle_mobile::framework::proto::VarType_Type>(type_);
}
inline void VarType::set_type(
::paddle_mobile::framework::proto::VarType_Type value) {
assert(::paddle_mobile::framework::proto::VarType_Type_IsValid(value));
set_has_type();
type_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarType.type)
}
// optional .paddle_mobile.framework.proto.VarType.TensorDesc selected_rows = 2;
inline bool VarType::has_selected_rows() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void VarType::set_has_selected_rows() { _has_bits_[0] |= 0x00000001u; }
inline void VarType::clear_has_selected_rows() {
_has_bits_[0] &= ~0x00000001u;
}
inline void VarType::clear_selected_rows() {
if (selected_rows_ != NULL)
selected_rows_
->::paddle_mobile::framework::proto::VarType_TensorDesc::Clear();
clear_has_selected_rows();
}
inline const ::paddle_mobile::framework::proto::VarType_TensorDesc&
VarType::selected_rows() const {
const ::paddle_mobile::framework::proto::VarType_TensorDesc* p =
selected_rows_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.selected_rows)
return p != NULL ? *p : *reinterpret_cast<const ::paddle_mobile::framework::
proto::VarType_TensorDesc*>(
&::paddle_mobile::framework::proto::
_VarType_TensorDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_TensorDesc*
VarType::mutable_selected_rows() {
set_has_selected_rows();
if (selected_rows_ == NULL) {
selected_rows_ = new ::paddle_mobile::framework::proto::VarType_TensorDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.selected_rows)
return selected_rows_;
}
inline ::paddle_mobile::framework::proto::VarType_TensorDesc*
VarType::release_selected_rows() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.selected_rows)
clear_has_selected_rows();
::paddle_mobile::framework::proto::VarType_TensorDesc* temp = selected_rows_;
selected_rows_ = NULL;
return temp;
}
inline void VarType::set_allocated_selected_rows(
::paddle_mobile::framework::proto::VarType_TensorDesc* selected_rows) {
delete selected_rows_;
selected_rows_ = selected_rows;
if (selected_rows) {
set_has_selected_rows();
} else {
clear_has_selected_rows();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.selected_rows)
}
// optional .paddle_mobile.framework.proto.VarType.LoDTensorDesc lod_tensor = 3;
inline bool VarType::has_lod_tensor() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void VarType::set_has_lod_tensor() { _has_bits_[0] |= 0x00000002u; }
inline void VarType::clear_has_lod_tensor() { _has_bits_[0] &= ~0x00000002u; }
inline void VarType::clear_lod_tensor() {
if (lod_tensor_ != NULL)
lod_tensor_
->::paddle_mobile::framework::proto::VarType_LoDTensorDesc::Clear();
clear_has_lod_tensor();
}
inline const ::paddle_mobile::framework::proto::VarType_LoDTensorDesc&
VarType::lod_tensor() const {
const ::paddle_mobile::framework::proto::VarType_LoDTensorDesc* p =
lod_tensor_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.lod_tensor)
return p != NULL ? *p : *reinterpret_cast<const ::paddle_mobile::framework::
proto::VarType_LoDTensorDesc*>(
&::paddle_mobile::framework::proto::
_VarType_LoDTensorDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_LoDTensorDesc*
VarType::mutable_lod_tensor() {
set_has_lod_tensor();
if (lod_tensor_ == NULL) {
lod_tensor_ = new ::paddle_mobile::framework::proto::VarType_LoDTensorDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.lod_tensor)
return lod_tensor_;
}
inline ::paddle_mobile::framework::proto::VarType_LoDTensorDesc*
VarType::release_lod_tensor() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.lod_tensor)
clear_has_lod_tensor();
::paddle_mobile::framework::proto::VarType_LoDTensorDesc* temp = lod_tensor_;
lod_tensor_ = NULL;
return temp;
}
inline void VarType::set_allocated_lod_tensor(
::paddle_mobile::framework::proto::VarType_LoDTensorDesc* lod_tensor) {
delete lod_tensor_;
lod_tensor_ = lod_tensor;
if (lod_tensor) {
set_has_lod_tensor();
} else {
clear_has_lod_tensor();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.lod_tensor)
}
// optional .paddle_mobile.framework.proto.VarType.LoDTensorArrayDesc
// tensor_array = 4;
inline bool VarType::has_tensor_array() const {
return (_has_bits_[0] & 0x00000004u) != 0;
}
inline void VarType::set_has_tensor_array() { _has_bits_[0] |= 0x00000004u; }
inline void VarType::clear_has_tensor_array() { _has_bits_[0] &= ~0x00000004u; }
inline void VarType::clear_tensor_array() {
if (tensor_array_ != NULL)
tensor_array_->::paddle_mobile::framework::proto::
VarType_LoDTensorArrayDesc::Clear();
clear_has_tensor_array();
}
inline const ::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc&
VarType::tensor_array() const {
const ::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc* p =
tensor_array_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.tensor_array)
return p != NULL ? *p
: *reinterpret_cast<const ::paddle_mobile::framework::proto::
VarType_LoDTensorArrayDesc*>(
&::paddle_mobile::framework::proto::
_VarType_LoDTensorArrayDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc*
VarType::mutable_tensor_array() {
set_has_tensor_array();
if (tensor_array_ == NULL) {
tensor_array_ =
new ::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.tensor_array)
return tensor_array_;
}
inline ::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc*
VarType::release_tensor_array() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.tensor_array)
clear_has_tensor_array();
::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc* temp =
tensor_array_;
tensor_array_ = NULL;
return temp;
}
inline void VarType::set_allocated_tensor_array(
::paddle_mobile::framework::proto::VarType_LoDTensorArrayDesc*
tensor_array) {
delete tensor_array_;
tensor_array_ = tensor_array;
if (tensor_array) {
set_has_tensor_array();
} else {
clear_has_tensor_array();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.tensor_array)
}
// optional .paddle_mobile.framework.proto.VarType.ReaderDesc reader = 5;
inline bool VarType::has_reader() const {
return (_has_bits_[0] & 0x00000008u) != 0;
}
inline void VarType::set_has_reader() { _has_bits_[0] |= 0x00000008u; }
inline void VarType::clear_has_reader() { _has_bits_[0] &= ~0x00000008u; }
inline void VarType::clear_reader() {
if (reader_ != NULL)
reader_->::paddle_mobile::framework::proto::VarType_ReaderDesc::Clear();
clear_has_reader();
}
inline const ::paddle_mobile::framework::proto::VarType_ReaderDesc&
VarType::reader() const {
const ::paddle_mobile::framework::proto::VarType_ReaderDesc* p = reader_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.reader)
return p != NULL ? *p : *reinterpret_cast<const ::paddle_mobile::framework::
proto::VarType_ReaderDesc*>(
&::paddle_mobile::framework::proto::
_VarType_ReaderDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_ReaderDesc*
VarType::mutable_reader() {
set_has_reader();
if (reader_ == NULL) {
reader_ = new ::paddle_mobile::framework::proto::VarType_ReaderDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.reader)
return reader_;
}
inline ::paddle_mobile::framework::proto::VarType_ReaderDesc*
VarType::release_reader() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.reader)
clear_has_reader();
::paddle_mobile::framework::proto::VarType_ReaderDesc* temp = reader_;
reader_ = NULL;
return temp;
}
inline void VarType::set_allocated_reader(
::paddle_mobile::framework::proto::VarType_ReaderDesc* reader) {
delete reader_;
reader_ = reader;
if (reader) {
set_has_reader();
} else {
clear_has_reader();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.reader)
}
// optional .paddle_mobile.framework.proto.VarType.ChannelDesc channel = 6;
inline bool VarType::has_channel() const {
return (_has_bits_[0] & 0x00000010u) != 0;
}
inline void VarType::set_has_channel() { _has_bits_[0] |= 0x00000010u; }
inline void VarType::clear_has_channel() { _has_bits_[0] &= ~0x00000010u; }
inline void VarType::clear_channel() {
if (channel_ != NULL)
channel_->::paddle_mobile::framework::proto::VarType_ChannelDesc::Clear();
clear_has_channel();
}
inline const ::paddle_mobile::framework::proto::VarType_ChannelDesc&
VarType::channel() const {
const ::paddle_mobile::framework::proto::VarType_ChannelDesc* p = channel_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.channel)
return p != NULL ? *p : *reinterpret_cast<const ::paddle_mobile::framework::
proto::VarType_ChannelDesc*>(
&::paddle_mobile::framework::proto::
_VarType_ChannelDesc_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_ChannelDesc*
VarType::mutable_channel() {
set_has_channel();
if (channel_ == NULL) {
channel_ = new ::paddle_mobile::framework::proto::VarType_ChannelDesc;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.channel)
return channel_;
}
inline ::paddle_mobile::framework::proto::VarType_ChannelDesc*
VarType::release_channel() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.channel)
clear_has_channel();
::paddle_mobile::framework::proto::VarType_ChannelDesc* temp = channel_;
channel_ = NULL;
return temp;
}
inline void VarType::set_allocated_channel(
::paddle_mobile::framework::proto::VarType_ChannelDesc* channel) {
delete channel_;
channel_ = channel;
if (channel) {
set_has_channel();
} else {
clear_has_channel();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.channel)
}
// optional .paddle_mobile.framework.proto.VarType.Tuple tuple = 7;
inline bool VarType::has_tuple() const {
return (_has_bits_[0] & 0x00000020u) != 0;
}
inline void VarType::set_has_tuple() { _has_bits_[0] |= 0x00000020u; }
inline void VarType::clear_has_tuple() { _has_bits_[0] &= ~0x00000020u; }
inline void VarType::clear_tuple() {
if (tuple_ != NULL)
tuple_->::paddle_mobile::framework::proto::VarType_Tuple::Clear();
clear_has_tuple();
}
inline const ::paddle_mobile::framework::proto::VarType_Tuple& VarType::tuple()
const {
const ::paddle_mobile::framework::proto::VarType_Tuple* p = tuple_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarType.tuple)
return p != NULL
? *p
: *reinterpret_cast<
const ::paddle_mobile::framework::proto::VarType_Tuple*>(
&::paddle_mobile::framework::proto::
_VarType_Tuple_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType_Tuple*
VarType::mutable_tuple() {
set_has_tuple();
if (tuple_ == NULL) {
tuple_ = new ::paddle_mobile::framework::proto::VarType_Tuple;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarType.tuple)
return tuple_;
}
inline ::paddle_mobile::framework::proto::VarType_Tuple*
VarType::release_tuple() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarType.tuple)
clear_has_tuple();
::paddle_mobile::framework::proto::VarType_Tuple* temp = tuple_;
tuple_ = NULL;
return temp;
}
inline void VarType::set_allocated_tuple(
::paddle_mobile::framework::proto::VarType_Tuple* tuple) {
delete tuple_;
tuple_ = tuple;
if (tuple) {
set_has_tuple();
} else {
clear_has_tuple();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarType.tuple)
}
// -------------------------------------------------------------------
// VarDesc
// required string name = 1;
inline bool VarDesc::has_name() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void VarDesc::set_has_name() { _has_bits_[0] |= 0x00000001u; }
inline void VarDesc::clear_has_name() { _has_bits_[0] &= ~0x00000001u; }
inline void VarDesc::clear_name() {
name_.ClearToEmptyNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
clear_has_name();
}
inline const ::std::string& VarDesc::name() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarDesc.name)
return name_.GetNoArena();
}
inline void VarDesc::set_name(const ::std::string& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
value);
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarDesc.name)
}
#if LANG_CXX11
inline void VarDesc::set_name(::std::string&& value) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::move(value));
// @@protoc_insertion_point(field_set_rvalue:paddle_mobile.framework.proto.VarDesc.name)
}
#endif
inline void VarDesc::set_name(const char* value) {
GOOGLE_DCHECK(value != NULL);
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(value));
// @@protoc_insertion_point(field_set_char:paddle_mobile.framework.proto.VarDesc.name)
}
inline void VarDesc::set_name(const char* value, size_t size) {
set_has_name();
name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
::std::string(reinterpret_cast<const char*>(value), size));
// @@protoc_insertion_point(field_set_pointer:paddle_mobile.framework.proto.VarDesc.name)
}
inline ::std::string* VarDesc::mutable_name() {
set_has_name();
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarDesc.name)
return name_.MutableNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline ::std::string* VarDesc::release_name() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarDesc.name)
clear_has_name();
return name_.ReleaseNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited());
}
inline void VarDesc::set_allocated_name(::std::string* name) {
if (name != NULL) {
set_has_name();
} else {
clear_has_name();
}
name_.SetAllocatedNoArena(
&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarDesc.name)
}
// required .paddle_mobile.framework.proto.VarType type = 2;
inline bool VarDesc::has_type() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void VarDesc::set_has_type() { _has_bits_[0] |= 0x00000002u; }
inline void VarDesc::clear_has_type() { _has_bits_[0] &= ~0x00000002u; }
inline void VarDesc::clear_type() {
if (type_ != NULL) type_->::paddle_mobile::framework::proto::VarType::Clear();
clear_has_type();
}
inline const ::paddle_mobile::framework::proto::VarType& VarDesc::type() const {
const ::paddle_mobile::framework::proto::VarType* p = type_;
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarDesc.type)
return p != NULL ? *p
: *reinterpret_cast<
const ::paddle_mobile::framework::proto::VarType*>(
&::paddle_mobile::framework::proto::
_VarType_default_instance_);
}
inline ::paddle_mobile::framework::proto::VarType* VarDesc::mutable_type() {
set_has_type();
if (type_ == NULL) {
type_ = new ::paddle_mobile::framework::proto::VarType;
}
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.VarDesc.type)
return type_;
}
inline ::paddle_mobile::framework::proto::VarType* VarDesc::release_type() {
// @@protoc_insertion_point(field_release:paddle_mobile.framework.proto.VarDesc.type)
clear_has_type();
::paddle_mobile::framework::proto::VarType* temp = type_;
type_ = NULL;
return temp;
}
inline void VarDesc::set_allocated_type(
::paddle_mobile::framework::proto::VarType* type) {
delete type_;
type_ = type;
if (type) {
set_has_type();
} else {
clear_has_type();
}
// @@protoc_insertion_point(field_set_allocated:paddle_mobile.framework.proto.VarDesc.type)
}
// optional bool persistable = 3 [default = false];
inline bool VarDesc::has_persistable() const {
return (_has_bits_[0] & 0x00000004u) != 0;
}
inline void VarDesc::set_has_persistable() { _has_bits_[0] |= 0x00000004u; }
inline void VarDesc::clear_has_persistable() { _has_bits_[0] &= ~0x00000004u; }
inline void VarDesc::clear_persistable() {
persistable_ = false;
clear_has_persistable();
}
inline bool VarDesc::persistable() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.VarDesc.persistable)
return persistable_;
}
inline void VarDesc::set_persistable(bool value) {
set_has_persistable();
persistable_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.VarDesc.persistable)
}
// -------------------------------------------------------------------
// BlockDesc
// required int32 idx = 1;
inline bool BlockDesc::has_idx() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
inline void BlockDesc::set_has_idx() { _has_bits_[0] |= 0x00000001u; }
inline void BlockDesc::clear_has_idx() { _has_bits_[0] &= ~0x00000001u; }
inline void BlockDesc::clear_idx() {
idx_ = 0;
clear_has_idx();
}
inline ::google::protobuf::int32 BlockDesc::idx() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.BlockDesc.idx)
return idx_;
}
inline void BlockDesc::set_idx(::google::protobuf::int32 value) {
set_has_idx();
idx_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.BlockDesc.idx)
}
// required int32 parent_idx = 2;
inline bool BlockDesc::has_parent_idx() const {
return (_has_bits_[0] & 0x00000002u) != 0;
}
inline void BlockDesc::set_has_parent_idx() { _has_bits_[0] |= 0x00000002u; }
inline void BlockDesc::clear_has_parent_idx() { _has_bits_[0] &= ~0x00000002u; }
inline void BlockDesc::clear_parent_idx() {
parent_idx_ = 0;
clear_has_parent_idx();
}
inline ::google::protobuf::int32 BlockDesc::parent_idx() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.BlockDesc.parent_idx)
return parent_idx_;
}
inline void BlockDesc::set_parent_idx(::google::protobuf::int32 value) {
set_has_parent_idx();
parent_idx_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.BlockDesc.parent_idx)
}
// repeated .paddle_mobile.framework.proto.VarDesc vars = 3;
inline int BlockDesc::vars_size() const { return vars_.size(); }
inline void BlockDesc::clear_vars() { vars_.Clear(); }
inline const ::paddle_mobile::framework::proto::VarDesc& BlockDesc::vars(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.BlockDesc.vars)
return vars_.Get(index);
}
inline ::paddle_mobile::framework::proto::VarDesc* BlockDesc::mutable_vars(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.BlockDesc.vars)
return vars_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::VarDesc* BlockDesc::add_vars() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.BlockDesc.vars)
return vars_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarDesc>*
BlockDesc::mutable_vars() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.BlockDesc.vars)
return &vars_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::VarDesc>&
BlockDesc::vars() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.BlockDesc.vars)
return vars_;
}
// repeated .paddle_mobile.framework.proto.OpDesc ops = 4;
inline int BlockDesc::ops_size() const { return ops_.size(); }
inline void BlockDesc::clear_ops() { ops_.Clear(); }
inline const ::paddle_mobile::framework::proto::OpDesc& BlockDesc::ops(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.BlockDesc.ops)
return ops_.Get(index);
}
inline ::paddle_mobile::framework::proto::OpDesc* BlockDesc::mutable_ops(
int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.BlockDesc.ops)
return ops_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::OpDesc* BlockDesc::add_ops() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.BlockDesc.ops)
return ops_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc>*
BlockDesc::mutable_ops() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.BlockDesc.ops)
return &ops_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::OpDesc>&
BlockDesc::ops() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.BlockDesc.ops)
return ops_;
}
// optional int32 forward_block_idx = 5 [default = -1];
inline bool BlockDesc::has_forward_block_idx() const {
return (_has_bits_[0] & 0x00000004u) != 0;
}
inline void BlockDesc::set_has_forward_block_idx() {
_has_bits_[0] |= 0x00000004u;
}
inline void BlockDesc::clear_has_forward_block_idx() {
_has_bits_[0] &= ~0x00000004u;
}
inline void BlockDesc::clear_forward_block_idx() {
forward_block_idx_ = -1;
clear_has_forward_block_idx();
}
inline ::google::protobuf::int32 BlockDesc::forward_block_idx() const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.BlockDesc.forward_block_idx)
return forward_block_idx_;
}
inline void BlockDesc::set_forward_block_idx(::google::protobuf::int32 value) {
set_has_forward_block_idx();
forward_block_idx_ = value;
// @@protoc_insertion_point(field_set:paddle_mobile.framework.proto.BlockDesc.forward_block_idx)
}
// -------------------------------------------------------------------
// ProgramDesc
// repeated .paddle_mobile.framework.proto.BlockDesc blocks = 1;
inline int ProgramDesc::blocks_size() const { return blocks_.size(); }
inline void ProgramDesc::clear_blocks() { blocks_.Clear(); }
inline const ::paddle_mobile::framework::proto::BlockDesc& ProgramDesc::blocks(
int index) const {
// @@protoc_insertion_point(field_get:paddle_mobile.framework.proto.ProgramDesc.blocks)
return blocks_.Get(index);
}
inline ::paddle_mobile::framework::proto::BlockDesc*
ProgramDesc::mutable_blocks(int index) {
// @@protoc_insertion_point(field_mutable:paddle_mobile.framework.proto.ProgramDesc.blocks)
return blocks_.Mutable(index);
}
inline ::paddle_mobile::framework::proto::BlockDesc* ProgramDesc::add_blocks() {
// @@protoc_insertion_point(field_add:paddle_mobile.framework.proto.ProgramDesc.blocks)
return blocks_.Add();
}
inline ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::BlockDesc>*
ProgramDesc::mutable_blocks() {
// @@protoc_insertion_point(field_mutable_list:paddle_mobile.framework.proto.ProgramDesc.blocks)
return &blocks_;
}
inline const ::google::protobuf::RepeatedPtrField<
::paddle_mobile::framework::proto::BlockDesc>&
ProgramDesc::blocks() const {
// @@protoc_insertion_point(field_list:paddle_mobile.framework.proto.ProgramDesc.blocks)
return blocks_;
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif // __GNUC__
#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
} // namespace proto
} // namespace framework
} // namespace paddle_mobile
namespace google {
namespace protobuf {
template <>
struct is_proto_enum<::paddle_mobile::framework::proto::VarType_Type>
: ::google::protobuf::internal::true_type {};
template <>
struct is_proto_enum<::paddle_mobile::framework::proto::AttrType>
: ::google::protobuf::internal::true_type {};
} // namespace protobuf
} // namespace google
// @@protoc_insertion_point(global_scope)
#endif // PROTOBUF_framework_2eproto__INCLUDED
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package paddle_mobile.framework.proto;
enum AttrType {
INT = 0;
FLOAT = 1;
STRING = 2;
INTS = 3;
FLOATS = 4;
STRINGS = 5;
BOOLEAN = 6;
BOOLEANS = 7;
BLOCK = 8;
LONG = 9;
}
// OpDesc describes an instance of a C++ framework::OperatorBase
// derived class type.
message OpDesc {
message Attr {
required string name = 1;
required AttrType type = 2;
optional int32 i = 3;
optional float f = 4;
optional string s = 5;
repeated int32 ints = 6;
repeated float floats = 7;
repeated string strings = 8;
optional bool b = 10;
repeated bool bools = 11;
optional int32 block_idx = 12;
optional int64 l = 13;
};
message Var {
required string parameter = 1;
repeated string arguments = 2;
};
required string type = 3;
repeated Var inputs = 1;
repeated Var outputs = 2;
repeated Attr attrs = 4;
optional bool is_target = 5 [ default = false ];
};
// OpProto describes a C++ framework::OperatorBase derived class.
message OpProto {
// VarProto describes the C++ type framework::Variable.
message Var {
required string name = 1;
required string comment = 2;
optional bool duplicable = 3 [ default = false ];
optional bool intermediate = 4 [ default = false ];
optional bool dispensable = 5 [ default = false ];
}
// AttrProto describes the C++ type Attribute.
message Attr {
required string name = 1;
required AttrType type = 2;
required string comment = 3;
// If that attribute is generated, it means the Paddle third
// language binding has responsibility to fill that
// attribute. End-User should not set that attribute.
optional bool generated = 4 [ default = false ];
}
required string type = 1;
repeated Var inputs = 2;
repeated Var outputs = 3;
repeated Attr attrs = 4;
required string comment = 5;
}
message VarType {
enum Type {
// Pod Types
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
// Other types that may need additional descriptions
LOD_TENSOR = 7;
SELECTED_ROWS = 8;
FEED_MINIBATCH = 9;
FETCH_LIST = 10;
STEP_SCOPES = 11;
LOD_RANK_TABLE = 12;
LOD_TENSOR_ARRAY = 13;
PLACE_LIST = 14;
READER = 15;
CHANNEL = 16;
// Any runtime decided variable type is raw
// raw variables should manage their own allocations
// in operators like nccl_op
RAW = 17;
TUPLE = 18;
}
required Type type = 1;
message TensorDesc {
// Should only be PODType. Is enforced in C++
required Type data_type = 1;
repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
}
optional TensorDesc selected_rows = 2;
message LoDTensorDesc {
required TensorDesc tensor = 1;
optional int32 lod_level = 2 [ default = 0 ];
}
optional LoDTensorDesc lod_tensor = 3;
message LoDTensorArrayDesc {
required TensorDesc tensor = 1;
optional int32 lod_level = 2 [ default = 0 ];
}
optional LoDTensorArrayDesc tensor_array = 4;
message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; }
optional ReaderDesc reader = 5;
message ChannelDesc {
required Type data_type = 1;
required int64 capacity = 2;
}
optional ChannelDesc channel = 6;
message Tuple { repeated Type element_type = 1; }
optional Tuple tuple = 7;
}
message VarDesc {
required string name = 1;
required VarType type = 2;
optional bool persistable = 3 [ default = false ];
}
message BlockDesc {
required int32 idx = 1;
required int32 parent_idx = 2;
repeated VarDesc vars = 3;
repeated OpDesc ops = 4;
optional int32 forward_block_idx = 5 [ default = -1 ];
}
// Please refer to
// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md
// for more details.
// TODO(panyx0718): A model can have multiple programs. Need a
// way to distinguish them. Maybe ID or name?
message ProgramDesc { repeated BlockDesc blocks = 1; }
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "lod_tensor.h"
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <iterator>
namespace paddle_mobile {
namespace framework {
std::ostream &operator<<(std::ostream &os, const LoD &lod) {
os << "{";
for (auto &v : lod) {
os << "{";
bool is_first = true;
for (auto &i : v) {
if (is_first) {
os << i;
is_first = false;
} else {
os << ", " << i;
}
}
os << "}";
}
os << "}";
return os;
}
std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
// PADDLE_ENFORCE(t.type().hash_code() == typeid(float).hash_code());
// if (!platform::is_cpu_place(t.place())) {
// LoDTensor tt;
// framework::TensorCopy(t, platform::CPUPlace(), &tt);
// platform::DeviceContextPool &pool =
// platform::DeviceContextPool::Instance(); auto &dev_ctx =
// *pool.Get(t.place()); dev_ctx.Wait();
//
// os << tt;
// return os;
// }
os << "dim: " << t.dims() << "\n";
os << "lod: " << t.lod() << "\n";
// only print first ten elements
int64_t size = t.numel() < 10 ? t.numel() : 10;
for (int64_t i = 0; i < size; ++i) {
os << t.data<float>()[i] << " ";
}
return os;
}
std::string LoDToString(const LoD &lod) {
std::ostringstream stream;
stream << lod;
return stream.str();
}
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
size_t elem_end) {
// PADDLE_ENFORCE_LT(level, in.size());
// PADDLE_ENFORCE_LT(elem_end, in[level].size());
LoD res;
res.resize(in.size() - level);
// copy the first level
res[0].assign(in[level].begin() + elem_begin,
in[level].begin() + elem_end + 1);
for (size_t lvl = 1; lvl < res.size(); lvl++) {
const auto &in_level = in[level + lvl];
const auto &above_level = res[lvl - 1];
auto &out_level = res[lvl];
out_level.assign(in_level.begin() + above_level.front(),
in_level.begin() + above_level.back() + 1);
}
for (size_t lvl = 0; lvl < res.size(); lvl++) {
// to make the first offset equals 0, all the elements minus the first
// element
size_t front = res[lvl].front();
for (auto &ele : res[lvl]) {
ele -= front;
}
}
return res;
}
LoD ToAbsOffset(const LoD &in) {
// the lowest level stores relative offsets
if (in.empty() || in.size() == 1) return in;
LoD result = in;
for (auto level = static_cast<int>(in.size() - 2); level >= 0; level--) {
for (size_t i = 0; i < in[level].size(); ++i) {
size_t index = in[level][i];
result[level][i] = result[level + 1][index];
}
}
return result;
}
bool operator==(const LoD &a, const LoD &b) {
if (a.size() != b.size()) {
return false;
}
for (size_t i = 0; i < a.size(); i++) {
const auto &a_level = a[i];
const auto &b_level = b[i];
if (a_level.size() != b_level.size()) {
return false;
}
for (size_t j = 0; j < a_level.size(); j++) {
if (a_level[j] != b_level[j]) {
return false;
}
}
}
return true;
}
bool CheckLoD(const LoD &in, int tensor_height) {
if (in.empty()) return true;
for (const auto &level : in) {
// check: there should be more than 2 offsets existing in each level.
if (level.size() < 2) return false;
// check: the first offset(the begin offset) of each level should be 0.
if (level.front() != 0) return false;
// check: all the offsets in a level should be ascending(no same items
// allows).
if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
if (a < b) return true;
return false;
})) {
std::cout << "ascending error";
return false;
}
}
// check: the lowest level's last offset should equals `tensor_height` if
// tensor_height>0.
if (tensor_height > 0 && (size_t)tensor_height != in.back().back())
return false;
// check: the higher level's last offset should equals the lower level's
// size-1.
// NOTE LoD store the levels from top to bottom, so the higher level goes
// first.
for (size_t level = 0; level < in.size() - 1; level++) {
if (in[level].back() != in[level + 1].size() - 1) return false;
}
return true;
}
bool CheckAbsLoD(const LoD &in, int tensor_height) {
if (in.empty()) return true;
for (const auto &level : in) {
// check: all the offsets in a level should be ascending(no same items
// allows).
if (!std::is_sorted(level.begin(), level.begin(), [](size_t a, size_t b) {
if (a < b) return true;
return false;
})) {
return false;
}
// check: there should be more than 2 offsets existing in each level.
if (level.size() < 2) return false;
// check: the first offset of each level should be 0, and the last should be
// the same(the height of underlying tensor).
if (level.front() != 0) return false;
if (tensor_height < 0) {
tensor_height = level.back();
} else if ((size_t)tensor_height != level.back()) {
return false;
}
}
return true;
}
using LoDAndOffset = std::pair<LoD, std::pair<size_t, size_t>>;
LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
size_t end_idx, size_t start_level) {
LoD sub_lod;
for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
// PADDLE_ENFORCE_LE(start_idx, end_idx);
// PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
std::vector<size_t> level_lens;
for (size_t i = start_idx; i < end_idx; ++i) {
level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
}
sub_lod.emplace_back(level_lens);
start_idx = lod[level_idx][start_idx];
end_idx = lod[level_idx][end_idx];
}
return LoDAndOffset{sub_lod, {start_idx, end_idx}};
}
void AppendLoD(LoD *lod, const LoD &lod_length) {
// PADDLE_ENFORCE(
// lod->empty() || lod->size() == lod_length.size(),
// "The lod_length should has the same size with the appended lod.");
if (lod->empty()) {
for (size_t i = 0; i < lod_length.size(); ++i) {
lod->emplace_back(1, 0); // size = 1, value = 0;
}
*lod = LoD(lod_length.size(), std::vector<size_t>({0}));
}
for (size_t i = 0; i < lod->size(); ++i) {
auto &level = (*lod)[i];
for (size_t len : lod_length[i]) {
level.push_back(level.back() + len);
}
}
}
void SerializeToStream(std::ostream &os, const LoDTensor &tensor) {
{ // the 1st field, uint32_t version for LoDTensor
constexpr uint32_t version = 0;
os.write(reinterpret_cast<const char *>(&version), sizeof(version));
}
{
// the 2st field, LoD information
// uint64_t lod_level
// uint64_t lod_level_1 size in byte.
// int* lod_level_1 data
// ...
auto lod = tensor.lod();
uint64_t size = lod.size();
os.write(reinterpret_cast<const char *>(&size), sizeof(size));
for (auto &each : lod) {
size = each.size() * sizeof(framework::LoD::value_type::value_type);
os.write(reinterpret_cast<const char *>(&size), sizeof(size));
os.write(reinterpret_cast<const char *>(each.data()),
static_cast<std::streamsize>(size));
}
}
// the 3st field, Tensor
TensorToStream(os, static_cast<Tensor>(tensor));
}
void DeserializeFromStream(std::istream &is, LoDTensor *tensor) {
{
// the 1st field, unit32_t version for LoDTensor
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
}
{
// the 2st field, LoD information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
auto &lod = *tensor->mutable_lod();
lod.resize(lod_level);
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
lod[i] = tmp;
}
}
// the 3st filed, Tensor
TensorFromStream(is, static_cast<Tensor *>(tensor));
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensor.h"
#include "tensor_util.h"
namespace paddle_mobile {
namespace framework {
/*
* LoD is short for Level of Details.
*
* - in a level, each element indicates relative offset of the lower level
* - the first element should be 0 and that indicates that this sequence start
* from 0
* - each sequence's begin and end(no-inclusive) is level[id, id+1]
*
* For example:
* 3-level LoD stores
*
* 0 2 3
* 0 2 4 7
* 0 2 5 7 10 12 15 20
*/
using LoD = std::vector<std::vector<size_t>>;
std::ostream &operator<<(std::ostream &os, const LoD &lod);
std::ostream &operator<<(std::ostream &os, const LoDTensor &t);
std::string LoDToString(const LoD &lod);
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
size_t elem_end);
/*
* Transform an LoD from relative offsets to absolute offsets.
*/
LoD ToAbsOffset(const LoD &in);
bool operator==(const LoD &a, const LoD &b);
/*
* Check whether this lod's format is valid.
*
* ATTENTION:
* - Empty lod is treated as valid.
*
* It will check two things:
*
* 1. all the offsets in a level should be ascending(no same items allows).
* 2. there should be more than 2 offsets existing in each level.
* 3. the higher level's last offset should equals the lower level's size-1.
* 4. the first offset(the begin offset) of each level should be 0.
* 5. the lowest level's last offset should equals `tensor_height` if
* tensor_height>0.
*/
bool CheckLoD(const LoD &in, int tensor_height = -1);
/*
* Check whether this absolute lod's format is valid.
*
* ATTENTION:
* - Empty lod is treated as valid.
*
* It will check two things:
* 1. all the offsets in a level should be ascending(no same items allows)
* 2. there should be more than 2 offsets existing in each level.
* 3. the first offset of each level should be 0, and the last should be the
* same(the height of underlying tensor) or `tensor_height` if
* tensor_height>0.
*/
bool CheckAbsLoD(const LoD &in, int tensor_height = -1);
/*
* LoDTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
class LoDTensor : public Tensor {
public:
LoDTensor() : Tensor() {}
explicit LoDTensor(const LoD &lod) : lod_(lod) {}
void set_lod(const LoD &lod) { lod_ = lod; }
const LoD &lod() const { return lod_; }
LoD *mutable_lod() { return &lod_; }
/*
* Get the start offset and end offset of an element from LoD.
*/
std::pair<size_t, size_t> lod_element(size_t level, size_t elem) const {
// PADDLE_ENFORCE_LT(level, NumLevels());
// PADDLE_ENFORCE_LT(elem, NumElements(level));
return std::make_pair((lod_)[level][elem], (lod_)[level][elem + 1]);
}
/*
* Number of LoDTensor's levels, each level has units of data, for example,
* in the sentence's view, article, paragraph, sentence are 3 levels.
*/
size_t NumLevels() const { return lod_.size(); }
/*
* Number of elements in a level.
*/
size_t NumElements(size_t level = 0) const {
// PADDLE_ENFORCE_LT(level, NumLevels());
// the last offset is the end of last element
return (lod_)[level].size() - 1;
}
private:
LoD lod_;
};
/*
* Expand the `source` to fit the LoD of `lod`. For example, a `source`
* LoDTensor is
* - LoD: [0, 2]
* - tensor: [a0, a1]
* a `lod` is
* - LoD: [0 3 5]
* returns a new LoDTensor
* - [a0 a0 a0 a1 a1]
*/
template <typename T>
LoDTensor LodExpand(const LoDTensor &source, const LoD &lod, size_t level) {
LoD abs_lod = ToAbsOffset(lod);
const auto &lod_level = lod[level];
size_t num_instances = source.dims()[0];
// new tensor
LoDTensor tensor;
tensor.set_lod(lod);
auto dims = source.dims();
dims[0] = lod_level.back();
tensor.Resize(dims);
tensor.mutable_data<T>();
// PADDLE_ENFORCE_EQ(num_instances, lod_level.size() - 1);
for (size_t ins = 0; ins < num_instances; ins++) {
for (size_t elem = lod_level[ins]; elem < lod_level[ins + 1]; elem++) {
auto slice = tensor.Slice(elem, elem + 1);
TensorCopy(source.Slice(ins, ins + 1), &slice);
}
}
return tensor;
}
// Get the absolute offset of a lod[start_level][start_idx:end_idx] and
// relative length of details for every levels(i.e., [start_level: ]).
//
// For example,
// lod = [[0, 3, 4, 8], [0, 9, 10, 11, 13, 17, 19, 22, 24]]
// start_level = 0
// start_idx = 1
// end_idx = 3
//
// Returns:
// LoD = [[1, 4], [2, 4, 2, 3, 2]]
// pair<size_t, size_t> = {11, 24}
std::pair<LoD, std::pair<size_t, size_t>> GetSubLoDAndAbsoluteOffset(
const LoD &lod, size_t start_idx, size_t end_idx, size_t start_level);
void AppendLoD(LoD *lod, const LoD &lod_length);
/*
* Serialize/Desiralize LoDTensor to std::ostream
* You can pass ofstream or ostringstream to serilize to file
* or to a in memory string. GPU tensor will be copied to CPU.
*/
void SerializeToStream(std::ostream &os, const LoDTensor &tensor);
void DeserializeFromStream(std::istream &is, LoDTensor *tensor);
} // namespace framework
} // namespace paddle_mobile
//
// Created by liuRuiLong on 2018/5/4.
//
#include "op_desc.h"
namespace paddle_mobile {
namespace framework {
OpDesc::OpDesc(const proto::OpDesc &desc) : desc_(desc) {
for (int i = 0; i < desc_.inputs_size(); ++i) {
const proto::OpDesc::Var &var = desc_.inputs(i);
std::vector<std::string> &args = inputs_[var.parameter()];
int arg_size = var.arguments_size();
for (int j = 0; j < arg_size; ++j) {
args.push_back(var.arguments(j));
}
}
for (int i = 0; i < desc_.outputs_size(); ++i) {
const proto::OpDesc::Var &var = desc_.outputs(i);
std::vector<std::string> &args = outputs_[var.parameter()];
int arg_size = var.arguments_size();
for (int j = 0; j < arg_size; ++j) {
args.push_back(var.arguments(j));
}
}
for (const proto::OpDesc::Attr &attr : desc_.attrs()) {
std::string attr_name = attr.name();
if (attr.type() != proto::AttrType::BLOCK) {
attrs_[attr_name] = Attribute::GetAttrValue(attr);
// if (attr.type() == proto::AttrType::INT){
// std::cout << " attrName " << attr_name << " " <<
// attrs_[attr_name].Get<int>() << std::endl;
// }
}
}
}
const std::vector<std::string> &OpDesc::Input(const std::string &name) const {
return inputs_.find(name)->second;
}
const std::vector<std::string> &OpDesc::Output(const std::string &name) const {
return outputs_.find(name)->second;
}
Attribute OpDesc::GetAttr(const std::string &name) const {
auto it = attrs_.find(name);
return it->second;
}
const std::unordered_map<std::string, Attribute> &OpDesc::GetAttrMap() const {
return attrs_;
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/type_define.h"
#include "framework.pb.h"
#include "paddle_mobile_object.h"
namespace paddle_mobile {
namespace framework {
class OpDesc : PaddleMobileObject {
public:
OpDesc(const proto::OpDesc &desc);
const std::vector<std::string> &Input(const std::string &name) const;
const std::vector<std::string> &Output(const std::string &name) const;
Attribute GetAttr(const std::string &name) const;
const VariableNameMap &GetInputs() { return inputs_; }
const VariableNameMap &GetOutputs() { return outputs_; }
const AttributeMap &GetAttrMap() const;
const std::string &Type() { return desc_.type(); };
private:
proto::OpDesc desc_;
VariableNameMap inputs_;
VariableNameMap outputs_;
AttributeMap attrs_;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/type_define.h"
#include "framework.pb.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
struct OpInfo {
OpCreator<Dtype> creator_;
const OpCreator<Dtype>& Creator() const {
// PADDLE_ENFORCE_NOT_NULL(creator_,
// "Operator Creator has not been registered");
return creator_;
}
};
template <typename Dtype>
class OpInfoMap;
template <typename Dtype>
static OpInfoMap<Dtype>* g_op_info_map = nullptr;
template <typename Dtype>
class OpInfoMap {
public:
static OpInfoMap& Instance() {
if (g_op_info_map<Dtype> == nullptr) {
g_op_info_map<Dtype> = new OpInfoMap();
}
return *g_op_info_map<Dtype>;
};
bool Has(const std::string& op_type) const {
return map_.find(op_type) != map_.end();
}
void Insert(const std::string& type, const OpInfo<Dtype>& info) {
// PADDLE_ENFORCE(!Has(type), "Operator %s has been registered", type);
map_.insert({type, info});
}
const OpInfo<Dtype>& Get(const std::string& type) const {
auto op_info_ptr = GetNullable(type);
// PADDLE_ENFORCE_NOT_NULL(op_info_ptr, "Operator %s has not been
// registered",
// type);
return *op_info_ptr;
}
const OpInfo<Dtype>* GetNullable(const std::string& type) const {
auto it = map_.find(type);
if (it == map_.end()) {
return nullptr;
} else {
return &it->second;
}
}
const std::unordered_map<std::string, OpInfo<Dtype>>& map() const {
return map_;
}
std::unordered_map<std::string, OpInfo<Dtype>>* mutable_map() {
return &map_;
}
private:
OpInfoMap() = default;
std::unordered_map<std::string, OpInfo<Dtype>> map_;
// DISABLE_COPY_AND_ASSIGN(OpInfoMap);
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "data_layout.h"
#include "framework.pb.h"
namespace paddle_mobile {
namespace framework {
struct OpKernelType {
struct Hash {
size_t operator()(const OpKernelType& key) const {
int data_type = static_cast<int>(key.data_type_) << LEFT_SHIFT;
int data_layout = static_cast<int>(key.data_layout_) << (LEFT_SHIFT * 2);
std::hash<int> hasher;
return hasher(data_type + data_layout);
}
};
// place, data_type, library_type kinds less than 2^8
constexpr static int LEFT_SHIFT = 8;
proto::VarType::Type data_type_;
DataLayout data_layout_;
OpKernelType(proto::VarType::Type data_type,
DataLayout data_layout = DataLayout::kAnyLayout)
: data_type_(data_type), data_layout_(data_layout) {}
bool operator==(const OpKernelType& o) const {
return data_type_ == o.data_type_ && data_layout_ == o.data_layout_;
}
bool operator!=(const OpKernelType& o) const { return !(*this == o); }
};
inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
return l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r;
}
inline bool TransFromNeeded(const OpKernelType& l, const OpKernelType& r) {
return (l.data_type_ != r.data_type_) ||
NeedTransformLayout(l.data_layout_, r.data_layout_);
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
namespace paddle_mobile {
namespace framework {
// this class not only make proto but also init attribute checkers.
class OpProtoAndCheckerMaker {};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "operator.h"
#include "op_info.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
OperatorBase<Dtype>::OperatorBase(const std::string& type,
const VariableNameMap& inputs,
const VariableNameMap& outputs,
const AttributeMap& attrs,
std::shared_ptr<Scope> scope)
: type_(type),
inputs_(inputs),
outputs_(outputs),
attrs_(attrs),
scope_(scope) {
CheckAllInputOutputSet();
}
template <typename Dtype>
void OperatorBase<Dtype>::Run() {
RunImpl();
}
template <typename Dtype>
void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
template class OperatorBase<CPU>;
template class OperatorWithKernel<CPU>;
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <map>
#include "attribute.h"
#include "block_desc.h"
#include "common/type_define.h"
#include "common/types.h"
#include "common/variant.h"
#include "op_info.h"
#include "op_kernel_type.h"
#include "paddle_mobile_object.h"
#include "scope.h"
#include "tensor.h"
#include "variable.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype>
class OperatorBase : PaddleMobileObject {
public:
OperatorBase(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap& attrs,
std::shared_ptr<Scope> scope);
virtual ~OperatorBase() {}
virtual void Run();
const VariableNameMap& Inputs() const { return inputs_; }
const VariableNameMap& Outputs() const { return outputs_; }
const std::string& Type() const { return type_; }
const AttributeMap& Attrs() const { return attrs_; }
protected:
std::shared_ptr<Scope> scope_;
std::string type_;
VariableNameMap inputs_;
VariableNameMap outputs_;
AttributeMap attrs_;
private:
void CheckAllInputOutputSet() const;
virtual void RunImpl() const = 0;
};
template <typename Dtype>
class OperatorWithKernel : public OperatorBase<Dtype> {
public:
OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap& attrs,
std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope) {}
virtual void InferShape() const = 0;
protected:
virtual void RunImpl() const = 0;
private:
};
template <typename Dtype, typename P>
class OpKernelBase : PaddleMobileObject {
public:
virtual void Compute(const P& para) const = 0;
virtual ~OpKernelBase() = default;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "paddle_mobile_object.h"
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <string>
#include "stdio.h"
namespace paddle_mobile {
class PaddleMobileObject {
public:
virtual inline const std::string& ToString() {
char address[128] = {0};
sprintf(address, "%p", this);
return std::string(address);
}
private:
};
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
namespace paddle_mobile {
namespace framework {}
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/types.h"
#include "paddle_mobile_object.h"
#include "program_desc.h"
#include "scope.h"
namespace paddle_mobile {
namespace framework {
template <typename Dtype, Precision P = Precision::FP32>
class Program : PaddleMobileObject {
public:
std::shared_ptr<ProgramDesc> originProgram;
std::shared_ptr<ProgramDesc> optimizeProgram;
std::shared_ptr<Scope> scope;
private:
};
} // namespace framework
} // namespace paddle_mobile
//
// Created by liuRuiLong on 2018/5/4.
//
#include "program_desc.h"
namespace paddle_mobile {
namespace framework {
ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) : desc_(desc) {
for (auto &block_desc : *desc_.mutable_blocks()) {
// new framework::BlockDesc(block_desc)
blocks_.emplace_back(std::make_shared<BlockDesc>(block_desc));
}
}
std::shared_ptr<BlockDesc> ProgramDesc::Block(size_t idx) {
return blocks_[idx];
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <vector>
#include "block_desc.h"
#include "common/types.h"
#include "paddle_mobile_object.h"
namespace paddle_mobile {
namespace framework {
class ProgramDesc : PaddleMobileObject {
public:
ProgramDesc(const proto::ProgramDesc &desc);
std::shared_ptr<BlockDesc> Block(size_t idx);
const std::vector<std::shared_ptr<BlockDesc>> &Blocks() { return blocks_; };
private:
std::vector<std::shared_ptr<BlockDesc>> blocks_;
proto::ProgramDesc desc_;
};
} // namespace framework
} // namespace paddle_mobile
#include "scope.h"
#include <set>
#include <vector>
namespace paddle_mobile {
namespace framework {
Scope &Scope::NewScope() const {
std::unique_lock<std::mutex> lock(mutex_);
kids_.push_back(new Scope(this));
return *kids_.back();
}
Variable *Scope::Var(const std::string &name) {
auto *pvar = FindVarLocally(name);
if (pvar != nullptr) {
return pvar;
};
pvar = new Variable;
vars_[name] = pvar;
pvar->name_ = &(vars_.find(name)->first);
return pvar;
}
// Variable* Scope::Var(std::string* name) {
// auto var_name = string::Sprintf("%p.%d", this, vars_.size());
// if (name != nullptr) {
// *name = var_name;
// }
// return Var(var_name);
// }
Variable *Scope::FindVar(const std::string &name) const {
auto *pvar = FindVarLocally(name);
if (pvar != nullptr) {
return pvar;
}
return (parent_ == nullptr) ? nullptr : parent_->FindVar(name);
}
const Scope *Scope::FindScope(const Variable *var) const {
for (auto &name_var : vars_) {
if (name_var.second == var) {
return this;
}
}
return (parent_ == nullptr) ? nullptr : parent_->FindScope(var);
}
void Scope::DropKids() {
for (Scope *s : kids_) {
delete s;
}
kids_.clear();
}
std::vector<std::string> Scope::LocalVarNames() const {
std::vector<std::string> known_vars;
known_vars.reserve(vars_.size());
for (auto &name_var : vars_) {
known_vars.emplace_back(name_var.first);
}
return known_vars;
}
void Scope::DeleteScope(Scope *scope) const {
std::unique_lock<std::mutex> lock(mutex_);
auto it = std::find(kids_.begin(), kids_.end(), scope);
kids_.erase(it);
delete scope;
// deferent
}
void Scope::EraseVars(const std::vector<std::string> &var_names) {
std::set<std::string> var_set(var_names.begin(), var_names.end());
for (auto it = vars_.begin(); it != vars_.end();) {
if (var_set.find(it->first) != var_set.end()) {
delete it->second;
it = vars_.erase(it);
} else {
++it;
}
}
}
void Scope::Rename(const std::string &origin_name,
const std::string &new_name) const {
auto origin_it = vars_.find(origin_name);
if (origin_it == vars_.end()) {
return;
}
auto new_it = vars_.find(new_name);
if (new_it != vars_.end()) {
return;
}
vars_[new_name] = origin_it->second;
vars_.erase(origin_it);
}
//
// std::string Scope::Rename(const std::string& origin_name) const {
// auto var_name = string::Sprintf("%p.%d", this, vars_.size());
// Rename(origin_name, var_name);
// return var_name;
// }
Variable *Scope::FindVarLocally(const std::string &name) const {
auto it = vars_.find(name);
if (it != vars_.end()) {
return it->second;
}
return nullptr;
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <list> //std::list
#include <mutex> //std::mutex
#include <unordered_map> //std::unordered_map
#include "variable.h"
namespace paddle_mobile {
namespace framework {
class Scope {
public:
Scope() {}
~Scope() {}
Scope& NewScope() const;
/// Create a variable with given name if it doesn't exist.
Variable* Var(const std::string& name);
/// Create a variable with a scope-unique name.
Variable* Var(std::string* name = nullptr);
void EraseVars(const std::vector<std::string>& var_names);
/// Find a variable in the scope or any of its ancestors. Returns
/// nullptr if cannot find.
Variable* FindVar(const std::string& name) const;
const Scope* parent() const { return parent_; }
/// Find the scope or an ancestor scope that contains the given variable.
const Scope* FindScope(const Variable* var) const;
void DeleteScope(Scope* scope) const;
/// Drop all kids scopes belonged to this scope.
void DropKids();
// enumerate all the variables current contains.
std::vector<std::string> LocalVarNames() const;
// Rename variable to a new name
void Rename(const std::string& origin_name,
const std::string& new_name) const;
// Rename variable to a new name and return the new name
std::string Rename(const std::string& origin_name) const;
Variable* FindVarLocally(const std::string& name) const;
private:
// Call Scope::NewScope for a sub-scope.
explicit Scope(Scope const* parent) : parent_(parent) {}
mutable std::unordered_map<std::string, Variable*> vars_;
mutable std::list<Scope*> kids_;
Scope const* parent_{nullptr};
mutable std::mutex mutex_;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <vector>
#include "lod_tensor.h"
#include "tensor.h"
namespace paddle_mobile {
namespace framework {
class SelectedRows {
public:
SelectedRows(const std::vector<int64_t>& rows, const int64_t& height)
: rows_(rows), height_(height) {
value_.reset(new Tensor());
}
SelectedRows() {
height_ = 0;
value_.reset(new Tensor());
}
const Tensor& value() const { return *value_; }
Tensor* mutable_value() { return value_.get(); }
int64_t height() const { return height_; }
void set_height(int64_t height) { height_ = height; }
const std::vector<int64_t>& rows() const { return rows_; }
std::vector<int64_t>* mutable_rows() { return &rows_; }
void set_rows(const std::vector<int64_t>& rows) { rows_ = rows; }
/**
* get the index of id in rows
*/
int64_t index(int64_t id) const {
auto it = std::find(rows_.begin(), rows_.end(), id);
// PADDLE_ENFORCE(it != rows_.end(), "id should be in rows");
return static_cast<int64_t>(std::distance(rows_.begin(), it));
}
DDim GetCompleteDims() const {
std::vector<int64_t> dims = vectorize(value_->dims());
dims[0] = height_;
return make_ddim(dims);
}
private:
// Notice: rows can be duplicate. We can have {0, 4, 7, 0, 5, 7, 9} here.
// SelectedRows are simply concated when adding together. Until a
// SelectedRows add a Tensor, will the duplicate rows be handled.
std::vector<int64_t> rows_;
std::unique_ptr<Tensor> value_{nullptr};
int64_t height_;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstdint>
#include <cstring>
#include <memory>
#include <typeindex>
#include <vector>
#include "data_layout.h"
#include "ddim.h"
#include "memory/t_malloc.h"
namespace paddle_mobile {
namespace framework {
template <typename... T>
struct SizeOfTypeFunctor;
template <typename T>
struct SizeOfTypeFunctor<T> {
size_t operator()(std::type_index type) const {
if (typeid(T).hash_code() == type.hash_code()) {
return sizeof(T);
} else {
return 0UL;
}
}
};
template <>
struct SizeOfTypeFunctor<> {
size_t operator()(std::type_index type) const { return 0UL; }
};
template <typename HEAD, typename... TAIL>
struct SizeOfTypeFunctor<HEAD, TAIL...> {
size_t operator()(std::type_index type) const {
SizeOfTypeFunctor<HEAD> head;
size_t head_size = head(type);
if (head_size != 0) {
return head_size;
}
SizeOfTypeFunctor<TAIL...> tail;
return tail(type);
}
};
static inline size_t SizeOfType(std::type_index type) {
SizeOfTypeFunctor<int, float, double, int16_t, int64_t, bool, size_t> functor;
size_t size = functor(type);
// PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name());
return size;
}
class LoDTensor;
class Tensor {
public:
Tensor() : offset_(0) {}
/*! Return a pointer to mutable memory block. */
template <typename T>
inline T *data() {
check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value ||
// holder_->type().hash_code() == typeid(T).hash_code(),
// "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name());
return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_);
}
/*! Return a pointer to constant memory block. */
template <typename T>
inline const T *data() const {
check_memory_size();
// PADDLE_ENFORCE(std::is_same<T, void>::value ||
// holder_->type().hash_code() == typeid(T).hash_code(),
// "Tensor holds the wrong type, it holds %s",
// this->holder_->type().name());
return reinterpret_cast<const T *>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
}
inline bool IsInitialized() const { return holder_ != nullptr; }
/**
* @brief Return a pointer to mutable memory block.
* @note If not exist, then allocation.
*/
template <typename T>
inline T *mutable_data() {
static_assert(std::is_pod<T>::value, "T must be POD");
return reinterpret_cast<T *>(mutable_data(typeid(T)));
}
inline void *mutable_data(std::type_index type) {
if (holder_ != nullptr) {
holder_->set_type(type);
}
// PADDLE_ENFORCE_GE(numel(), 0,
// "When calling this method, the Tensor's numel must be
// " "equal or larger than zero. " "Please check
// Tensor::Resize has been called first.");
int64_t size = numel() * SizeOfType(type);
/* some versions of boost::variant don't have operator!= */
if (holder_ == nullptr || holder_->size() < size + offset_) {
holder_.reset(new PlaceholderImpl(size, type));
offset_ = 0;
}
return reinterpret_cast<void *>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
}
inline void *mutable_data() {
// PADDLE_ENFORCE(this->holder_ != nullptr,
// "Cannot invoke mutable data if current hold nothing.");
return mutable_data(holder_->type());
}
/**
* @brief Return a pointer to mutable memory block.
*
* @param[in] dims The dimensions of the memory block.
* @param[in] place The place of the memory block.
*
* @note If not exist, then allocation.
*/
template <typename T>
inline T *mutable_data(DDim dims) {
static_assert(std::is_pod<T>::value, "T must be POD");
Resize(dims);
return mutable_data<T>();
}
/*! Return the dimensions of the memory block. */
inline const DDim &dims() const { return dims_; }
/*! Return the numel of the memory block. */
inline int64_t numel() const { return product(dims_); }
/*! Resize the dimensions of the memory block. */
inline Tensor &Resize(const DDim &dims) {
dims_ = dims;
return *this;
}
/*! The internal of two tensors share the same memory block. */
inline Tensor &ShareDataWith(const Tensor &src) {
src.check_memory_size();
*this = src;
return *this;
}
/**
* @brief Return a sub-tensor of the given tensor.
*
* @param[in] begin_idx The index of the start row(inclusive) to slice.
* The index number begins from 0.
* @param[in] end_idx The index of the end row(exclusive) to slice.
* The index number begins from 0.
*/
inline Tensor Slice(int begin_idx, int end_idx) const {
check_memory_size();
// PADDLE_ENFORCE_GE(begin_idx, 0,
// "The start row index must be greater than 0.");
// PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of
// bound."); PADDLE_ENFORCE_LT(
// begin_idx, end_idx,
// "The start row index must be lesser than the end row index.");
if (dims_[0] == 1) {
return *this;
} else {
size_t base = numel() / dims_[0];
Tensor dst;
dst.holder_ = holder_;
dst.set_layout(layout_);
DDim dst_dims = dims_;
dst_dims[0] = end_idx - begin_idx;
dst.Resize(dst_dims);
dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
return dst;
}
}
std::type_index type() const {
// PADDLE_ENFORCE_NOT_NULL(
// holder_, "Tensor not initialized yet when
// Tensor::type() is called.");
return holder_->type();
}
// memory size returns the holding memory size in byte.
size_t memory_size() const {
return holder_ == nullptr ? 0UL : holder_->size() - offset_;
}
inline void check_memory_size() const {
// PADDLE_ENFORCE_NOT_NULL(
// holder_, "Tensor holds no memory. Call Tensor::mutable_data
// first.");
// PADDLE_ENFORCE_LE(
// numel() * SizeOfType(type()), memory_size(),
// "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
// "first to re-allocate memory.\n"
// "or maybe the required data-type mismatches the data already
// stored.");
}
inline DataLayout layout() const { return layout_; }
inline void set_layout(const DataLayout layout) { layout_ = layout; }
private:
/**
* @note Placeholder hides type T, so it doesn't appear as a template
* parameter of Variable.
*/
struct Placeholder {
virtual ~Placeholder() = default;
virtual void *ptr() const = 0;
virtual size_t size() const = 0;
virtual std::type_index type() const = 0;
virtual void set_type(std::type_index type) = 0;
};
struct PlaceholderImpl : public Placeholder {
PlaceholderImpl(size_t size, std::type_index type)
: ptr_(static_cast<uint8_t *>(memory::Alloc(size)),
memory::PODDeleter<uint8_t>()),
size_(size),
type_(type) {
// PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s
// memory to allocation.",
// (is_cpu_place(place_) ?
// "CPU" : "GPU"));
}
virtual size_t size() const { return size_; }
virtual void *ptr() const { return static_cast<void *>(ptr_.get()); }
virtual std::type_index type() const { return type_; }
virtual void set_type(std::type_index type) { type_ = type; }
/*! the pointer of memory block. */
std::unique_ptr<uint8_t, memory::PODDeleter<uint8_t>> ptr_;
/*! the size of memory block. */
size_t size_;
/* the current type of memory */
std::type_index type_;
};
/*! holds the memory block if allocated. */
std::shared_ptr<Placeholder> holder_;
/**
* @brief points to elements dimensions.
*
* @note dims_ do not indicate the memory block size.
*/
DDim dims_;
/**
* @brief the layout of memory block, default is NHWC.
*
* @note the memory allocation order, describe how weight/data is stored
* For example, in 4-D Tensor(rank=4), there are three commonly
* used layout. They are
* NCHW, NHWC, CHWN.
* N,C,H,W for respectively the batch size, the number of
* feature maps, the height, the width.
*/
DataLayout layout_ = DataLayout::kNHWC;
/**
* @brief A PlaceHolder may be shared by more than one tensor.
*
* @note Some of them may be slices of the others. So the offset_
* is introduced here to indicate the byte offset between
* PlaceHolder::ptr_ and where the tensor data really begins.
*/
size_t offset_;
};
inline Tensor ReshapeToMatrix(const Tensor &src, int num_col_dims) {
Tensor res;
res.ShareDataWith(src);
res.Resize(flatten_to_2d(src.dims(), num_col_dims));
return res;
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "tensor_util.h"
#include <algorithm>
#include <limits>
#include <vector>
namespace paddle_mobile {
namespace framework {
void TensorCopy(const Tensor& src, Tensor* dst) {
// VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to
// "
// << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(src.type());
auto size = src.numel() * SizeOfType(src.type());
memory::Copy(dst_ptr, src_ptr, size);
}
void TensorCopySync(const Tensor& src, Tensor* dst) {
// VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
// << " to " << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(src.type());
auto size = src.numel() * SizeOfType(src.type());
memory::Copy(dst_ptr, src_ptr, size);
}
template <typename Predicate>
struct AnyDTypeVisitor {
Predicate predicate_;
const Tensor& tensor_;
Tensor* out_;
AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, Tensor* out)
: predicate_(predicate), tensor_(tensor), out_(out) {}
template <typename T>
void operator()() const {
// auto t = EigenVector<T>::Flatten(tensor_);
// auto o = EigenScalar<bool>::From(*out_);
// return any of predicate_(t) is true.
// o.device(*ctx_.eigen_device()) = predicate_(t).any();
}
};
template <typename Predicate>
inline void AnyImpl(Predicate predicate, const Tensor& tensor,
framework::Tensor* out) {
VisitDataType(ToDataType(tensor.type()),
AnyDTypeVisitor<Predicate>(predicate, tensor, out));
}
template <typename Predicate>
struct AnyVisitor {
const framework::Tensor& tensor_;
Predicate predicate_;
AnyVisitor(const framework::Tensor& tensor, Predicate predicate)
: tensor_(tensor), predicate_(std::move(predicate)) {}
bool operator()(void) const {
framework::Tensor out;
out.Resize({1});
out.mutable_data<bool>();
AnyImpl(predicate_, tensor_, &out);
return this->GetResult(out);
}
bool GetResult(const framework::Tensor& out) const {
return *out.data<bool>();
}
};
template <typename Predicate>
inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
AnyVisitor<Predicate> visitor(tensor, predicate);
// return platform::VisitPlace(visitor);
return visitor();
}
struct ContainsNANPredicate {
template <typename T>
auto operator()(const T& eigen_vec) const
-> decltype(std::declval<T>().isnan()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isnan();
}
};
bool TensorContainsNAN(const framework::Tensor& tensor) {
ContainsNANPredicate predicate;
return Any(tensor, predicate);
}
struct ContainsInfPredicate {
template <typename T>
auto operator()(const T& eigen_vec) const
-> decltype(std::declval<T>().isinf()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isinf();
}
};
bool TensorContainsInf(const framework::Tensor& tensor) {
ContainsInfPredicate predicate;
return Any(tensor, predicate);
}
void TensorToStream(std::ostream& os, const Tensor& tensor) {
{ // the 1st field, uint32_t version
constexpr uint32_t version = 0;
os.write(reinterpret_cast<const char*>(&version), sizeof(version));
}
{ // the 2nd field, tensor description
// int32_t size
// void* protobuf message
proto::VarType::TensorDesc desc;
desc.set_data_type(framework::ToDataType(tensor.type()));
auto dims = framework::vectorize(tensor.dims());
auto* pb_dims = desc.mutable_dims();
pb_dims->Resize(static_cast<int>(dims.size()), 0);
std::copy(dims.begin(), dims.end(), pb_dims->begin());
int32_t size = desc.ByteSize();
os.write(reinterpret_cast<const char*>(&size), sizeof(size));
auto out = desc.SerializeAsString();
os.write(out.data(), size);
}
{ // the 3rd field, tensor data
uint64_t size = tensor.memory_size();
auto* data_ptr = tensor.data<void>();
// PADDLE_ENFORCE(size < std::numeric_limits<std::streamsize>::max(),
// "Index overflow when writing tensor");
os.write(static_cast<const char*>(data_ptr),
static_cast<std::streamsize>(size));
}
}
struct DeserializedDataFunctor {
DeserializedDataFunctor(void** buf, Tensor* tensor)
: buf_(buf), tensor_(tensor) {}
template <typename T>
void operator()() {
*buf_ = tensor_->mutable_data<T>();
}
void** buf_;
Tensor* tensor_;
};
void TensorFromStream(std::istream& is, framework::Tensor* tensor) {
uint32_t version;
is.read(reinterpret_cast<char*>(&version), sizeof(version));
// PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported");
proto::VarType::TensorDesc desc;
{ // int32_t size
// proto buffer
int32_t size;
is.read(reinterpret_cast<char*>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char*>(buf.get()), size);
// PADDLE_ENFORCE(desc.ParseFromArray(buf.get(), size),
// "Cannot parse tensor desc");
}
{ // read tensor
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims));
void* buf;
framework::VisitDataType(desc.data_type(),
DeserializedDataFunctor(&buf, tensor));
is.read(static_cast<char*>(buf), tensor->memory_size());
}
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "framework.pb.h"
#include "memory/t_malloc.h"
#include "platform/data_type.h"
#include "tensor.h"
namespace paddle_mobile {
namespace framework {
void TensorCopy(const Tensor& src, Tensor* dst);
void TensorCopySync(const Tensor& src, Tensor* dst);
template <typename T>
void TensorFromVector(const std::vector<T>& src, Tensor* dst);
template <typename T>
void TesnorToVector(const Tensor& src, std::vector<T>* dst);
bool TensorContainsNAN(const framework::Tensor& tensor);
bool TensorContainsInf(const framework::Tensor& tensor);
void TensorToStream(std::ostream& os, const Tensor& tensor);
void TensorFromStream(std::istream& is, Tensor* tensor);
//
// The implementation of template functions.
//
template <typename T>
void TensorFromVector(const std::vector<T>& src, Tensor* dst) {
auto src_ptr = static_cast<const void*>(src.data());
dst->Resize({static_cast<int64_t>(src.size())});
auto dst_ptr = static_cast<void*>(dst->mutable_data<T>());
auto size = src.size() * sizeof(T);
memory::Copy(dst_ptr, src_ptr, size);
}
template <typename T>
void TensorToVector(const Tensor& src, std::vector<T>* dst) {
auto src_ptr = static_cast<const void*>(src.data<T>());
auto size = src.numel() * sizeof(T);
dst->resize(src.numel());
auto dst_ptr = static_cast<void*>(dst->data());
memory::Copy(dst_ptr, src_ptr, size);
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "var_desc.h"
namespace paddle_mobile {
namespace framework {
VarDesc::VarDesc(const proto::VarDesc &desc) : desc_(desc) {}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "framework.pb.h"
#include "paddle_mobile_object.h"
namespace paddle_mobile {
namespace framework {
class VarDesc {
public:
VarDesc(const proto::VarDesc &desc);
std::string Name() const { return desc_.name(); }
proto::VarType::Type GetType() const { return desc_.type().type(); }
bool Persistable() const { return desc_.persistable(); }
const proto::VarType::ChannelDesc &channel_desc() const {
switch (desc_.type().type()) {
case proto::VarType::CHANNEL:
return desc_.type().channel();
default:
break;
}
}
const proto::VarType::TensorDesc &tensor_desc() const {
switch (desc_.type().type()) {
case proto::VarType::SELECTED_ROWS:
return desc_.type().selected_rows();
case proto::VarType::LOD_TENSOR:
return desc_.type().lod_tensor().tensor();
case proto::VarType::LOD_TENSOR_ARRAY:
return desc_.type().tensor_array().tensor();
default:
break;
}
}
proto::VarType::Type GetDataType() const {
switch (desc_.type().type()) {
case proto::VarType::CHANNEL:
return channel_desc().data_type();
break;
default:
return tensor_desc().data_type();
}
}
template <typename T>
std::vector<T> RepeatedToVector(
const google::protobuf::RepeatedField<T> &repeated_field) const {
std::vector<T> ret;
ret.reserve(repeated_field.size());
std::copy(repeated_field.begin(), repeated_field.end(),
std::back_inserter(ret));
return ret;
}
std::vector<int64_t> GetShape() const {
return this->RepeatedToVector(tensor_desc().dims());
}
private:
proto::VarDesc desc_;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "framework.pb.h"
#include "lod_tensor.h"
#include "selected_rows.h"
#include "variable.h"
namespace paddle_mobile {
namespace framework {
inline proto::VarType::Type ToVarType(std::type_index type) {
if (type.hash_code() == typeid(LoDTensor).hash_code()) {
return proto::VarType_Type_LOD_TENSOR;
} else if (type.hash_code() == typeid(SelectedRows).hash_code()) {
return proto::VarType_Type_SELECTED_ROWS;
} else {
// PADDLE_THROW("ToVarType:Unsupported type %s", type.name());
}
}
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <iostream>
#include <memory>
#include <string>
#include <typeindex>
#include <typeinfo>
#include "paddle_mobile_object.h"
namespace paddle_mobile {
namespace framework {
class Variable : public PaddleMobileObject {
public:
Variable() {}
~Variable() {}
template <typename T>
const T* Get() const {
return static_cast<const T*>(holder_->Ptr());
}
bool IsInitialized() const { return holder_ != nullptr; }
const std::string* Name() { return name_; }
template <typename T>
T* GetMutable() {
if (!IsType<T>()) {
if (*Name() == "pixel") {
// std::cout << " reset " << *Name() << std::endl;
}
holder_.reset(new PlaceholderImp<T>(new T()));
}
return static_cast<T*>(holder_->Ptr());
}
template <typename T>
bool IsType() const {
if (holder_) {
// printf("not null \n");
printf(" holder type : %s, this type %s \n", holder_->Type().name(),
typeid(T).name());
}
// std::cout << " " << holder_->Type() << " " << typeid(T) <<
// std::endl;
return holder_ != nullptr && holder_->Type() == typeid(T);
}
void Clear() { holder_.reset(); }
std::type_index Type() const { return holder_->Type(); }
void SetName(const std::string* name) { name_ = name; }
private:
struct Placeholder {
Placeholder() = default;
virtual ~Placeholder() = default;
virtual const std::type_info& Type() const = 0;
virtual void* Ptr() const = 0;
};
template <typename T>
struct PlaceholderImp : public Placeholder {
explicit PlaceholderImp(T* ptr) : ptr_(ptr), type_(typeid(T)) {}
virtual const std::type_info& Type() const { return type_; }
virtual void* Ptr() const override {
return static_cast<void*>(ptr_.get());
}
std::unique_ptr<T> ptr_;
const std::type_info& type_;
};
std::unique_ptr<Placeholder> holder_;
friend class Scope;
const std::string* name_;
};
} // namespace framework
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include <fstream>
#include <iostream>
#include "framework/framework.pb.h"
#include "framework/lod_tensor.h"
#include "framework/program_desc.h"
#include "framework/scope.h"
#include "framework/tensor.h"
#include "io.h"
namespace paddle_mobile {
void ReadBinaryFile(const std::string &filename, std::string *contents) {
std::ifstream fin(filename, std::ios::in | std::ios::binary);
fin.seekg(0, std::ios::end);
contents->clear();
contents->resize(fin.tellg());
fin.seekg(0, std::ios::beg);
fin.read(&(contents->at(0)), contents->size());
fin.close();
}
template <typename Dtype, Precision P>
void Loader<Dtype, P>::LoadVar(framework::LoDTensor *tensor,
const std::string &file_path) {
// std::cout << " to load " << file_path << std::endl;
std::ifstream is(file_path);
std::streampos pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
// std::cout << " file length = " << is.tellg() << std::endl;
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// std::cout << " version: " << version << std::endl;
// 2 Lod information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
// std::cout << " load level: " << lod_level << std::endl;
// std::cout << " lod info: " << std::endl;
auto &lod = *tensor->mutable_lod();
lod.resize(lod_level);
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
for (int j = 0; j < tmp.size(); ++j) {
// std::cout << " lod - " << tmp[j] << std::endl;
}
lod[i] = tmp;
}
// 3. tensor version
uint32_t tensor_version;
is.read(reinterpret_cast<char *>(&tensor_version), sizeof(tensor_version));
// std::cout << " tensor_version: " << tensor_version << std::endl;
// 4. tensor desc
int32_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
// std::cout << " tensor desc size: " << size << std::endl;
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char *>(buf.get()), size);
framework::proto::VarType::TensorDesc desc;
desc.ParseFromArray(buf.get(), size);
// std::cout << " desc dims size " << desc.dims().size() << std::endl;
int memory_size = 1;
for (int l = 0; l < desc.dims().size(); ++l) {
// std::cout << " dim " << l << " value: " << desc.dims()[l] <<
// std::endl;
memory_size *= desc.dims()[l];
}
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims));
void *memory;
int type_size = 0;
// std::cout << " desc pre type: ";
switch (desc.data_type()) {
case framework::proto::VarType::FP16:
// std::cout << "FP16" << std::endl;
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
memory = tensor->mutable_data<float>();
// std::cout << "FP32" << std::endl;
break;
case framework::proto::VarType::FP64:
type_size = 8;
// std::cout << "FP64" << std::endl;
break;
case framework::proto::VarType::INT32:
type_size = 4;
// std::cout << "INT32" << std::endl;
break;
case framework::proto::VarType::INT64:
type_size = 8;
// std::cout << "INT64" << std::endl;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
// std::cout << "BOOL" << std::endl;
break;
default:
break;
// std::cout << " not support" << std::endl;
}
// std::cout << " malloc size: " << memory_size * type_size << std::endl;
is.read(static_cast<char *>(memory), memory_size * type_size);
// std::cout << " memory: " << memory << std::endl;
is.close();
};
template <typename Dtype, Precision P>
const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
const std::string &dirname) {
std::string model_filename = dirname + "/__model__";
std::string program_desc_str;
ReadBinaryFile(model_filename, &program_desc_str);
framework::proto::ProgramDesc program_desc_proto;
program_desc_proto.ParseFromString(program_desc_str);
std::shared_ptr<framework::ProgramDesc> originProgramDesc =
std::make_shared<framework::ProgramDesc>(program_desc_proto);
framework::Program<Dtype, P> program;
program.originProgram = originProgramDesc;
std::shared_ptr<framework::Scope> scope =
std::make_shared<framework::Scope>();
program.scope = scope;
auto block = originProgramDesc->Block(0);
for (auto block : originProgramDesc->Blocks()) {
// std::cout << "for block" << std::endl;
for (int i = 0; i < block->Vars().size(); ++i) {
std::shared_ptr<framework::VarDesc> var_desc = block->Vars()[i];
auto var = scope->Var(var_desc->Name());
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
if (var_desc->Persistable() &&
var_desc->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
var_desc->GetType() != framework::proto::VarType::FETCH_LIST) {
framework::LoDTensor *tensor =
var->GetMutable<framework::LoDTensor>();
// to load
LoadVar(tensor, dirname + "/" + var_desc->Name());
}
} else {
// std::cout << "非 lod" << std::endl;
}
}
}
#ifdef PADDLE_MOBILE_DEBUG
for (int i = 0; i < program_desc_proto.blocks().size(); ++i) {
framework::proto::BlockDesc block = program_desc_proto.blocks()[i];
// std::cout << "block: " << block.idx() << std::endl;
for (int j = 0; j < block.ops().size(); ++j) {
framework::proto::OpDesc op = block.ops()[j];
// std::cout << " op: " << op.type() << std::endl;
for (int m = 0; m < op.inputs_size(); ++m) {
const framework::proto::OpDesc::Var &var = op.inputs(m);
// std::cout << " input parameter: " << var.parameter() <<
// std::endl;
for (int n = 0; n < var.arguments().size(); ++n) {
// std::cout << " argument - " << var.arguments()[n] <<
// std::endl;
}
}
for (int y = 0; y < op.outputs_size(); ++y) {
const framework::proto::OpDesc::Var &var = op.outputs(y);
// std::cout << " output parameter: " << var.parameter() <<
// std::endl;
for (int z = 0; z < var.arguments().size(); ++z) {
// std::cout << " argument - " << var.arguments()[z] <<
// std::endl;
}
}
for (int x = 0; x < op.attrs().size(); ++x) {
const framework::proto::OpDesc_Attr attr = op.attrs()[x];
// std::cout << " attr name: " << attr.name() << std::endl;
// std::cout << " attr type: " << attr.type() << std::endl;
switch (attr.type()) {
case framework::proto::AttrType::BOOLEAN:
// std::cout << " boolen: " << attr.b() << std::endl;
break;
case framework::proto::AttrType::INT:
// std::cout << " int: " << attr.i() << std::endl;
break;
case framework::proto::AttrType::FLOAT:
// std::cout << " float: " << attr.f() << std::endl;
case framework::proto::AttrType::STRING:
// std::cout << " string: " << attr.s() << std::endl;
case framework::proto::AttrType::BOOLEANS:
// std::vector<bool>
// bools(attr.bools_size());
for (int y = 0; y < attr.bools_size(); ++y) {
// std::cout << " bool - " << attr.bools(y) <<
// std::endl;
}
case framework::proto::AttrType::LONG:
// std::cout << " long: " << attr.l() << std::endl;
case framework::proto::AttrType::FLOATS:
for (int y = 0; y < attr.floats_size(); ++y) {
// std::cout << " float - " << y << ": " <<
// attr.floats(y)
// << std::endl;
}
case framework::proto::AttrType::INTS:
for (int y = 0; y < attr.ints_size(); ++y) {
// std::cout << " int - " << y << ": " <<
// attr.ints(y)
// << std::endl;
}
case framework::proto::AttrType::STRINGS:
for (int y = 0; y < attr.strings_size(); ++y) {
// std::cout << " string - " << y << ": " <<
// attr.strings(y)
// << std::endl;
}
}
}
}
for (int k = 0; k < block.vars().size(); ++k) {
framework::proto::VarDesc var = block.vars()[k];
if (var.type().type() == framework::proto::VarType::LOD_TENSOR) {
// std::cout << " var name: " << var.name() << std::endl;
const framework::proto::VarType::TensorDesc &tensor_desc =
var.type().lod_tensor().tensor();
// std::cout << " in var tensor desc dims size "
// << tensor_desc.dims().size() << std::endl;
int memory_size = 1;
for (int l = 0; l < tensor_desc.dims().size(); ++l) {
// std::cout << " var tensor desc dim " << l
// << " value: " << tensor_desc.dims()[l] <<
// std::endl;
}
}
if (var.persistable() &&
var.type().type() != framework::proto::VarType::FEED_MINIBATCH &&
var.type().type() != framework::proto::VarType::FETCH_LIST) {
// std::cout << " to load " << var.name() << std::endl;
std::string file_path = dirname + "/" + var.name();
std::ifstream is(file_path);
std::streampos pos = is.tellg(); // save current position
is.seekg(0, std::ios::end);
// std::cout << " file length = " << is.tellg() << std::endl;
is.seekg(pos); // restore saved position
// 1. version
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// std::cout << " version: " << version << std::endl;
// 2 Lod information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
// std::cout << " load level: " << lod_level << std::endl;
// std::cout << " lod info: " << std::endl;
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
for (int j = 0; j < tmp.size(); ++j) {
// std::cout << " lod - " << tmp[j] << std::endl;
}
}
uint32_t tensor_version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
// std::cout << " tensor_version: " << tensor_version <<
// std::endl;
int32_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
// std::cout << " tensor desc size: " << size << std::endl;
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char *>(buf.get()), size);
framework::proto::VarType::TensorDesc desc;
desc.ParseFromArray(buf.get(), size);
// std::cout << " desc dims size " << desc.dims().size() <<
// std::endl;
int memory_size = 1;
for (int l = 0; l < desc.dims().size(); ++l) {
// std::cout << " dim " << l << " value: " <<
// desc.dims()[l]
// << std::endl;
memory_size *= desc.dims()[l];
}
int type_size = 0;
// std::cout << " desc pre type: ";
switch (desc.data_type()) {
case framework::proto::VarType::FP16:
// std::cout << "FP16" << std::endl;
type_size = 2;
break;
case framework::proto::VarType::FP32:
type_size = 4;
// std::cout << "FP32" << std::endl;
break;
case framework::proto::VarType::FP64:
type_size = 8;
// std::cout << "FP64" << std::endl;
break;
case framework::proto::VarType::INT32:
type_size = 4;
// std::cout << "INT32" << std::endl;
break;
case framework::proto::VarType::INT64:
type_size = 8;
// std::cout << "INT64" << std::endl;
break;
case framework::proto::VarType::BOOL:
type_size = 1;
// std::cout << "BOOL" << std::endl;
break;
default:
break;
// std::cout << " not support" << std::endl;
}
// std::cout << " malloc size: " << memory_size * type_size
// << std::endl;
void *memory = malloc(memory_size * type_size);
is.read(static_cast<char *>(memory), memory_size * type_size);
// std::cout << " memory: " << memory << std::endl;
is.close();
} else {
// std::cout << " *not load "
// << " var : " << var.name() << std::endl;
}
}
}
#endif
return program;
}
template class Loader<CPU, Precision::FP32>;
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <string>
#include "common/types.h"
#include "framework/lod_tensor.h"
#include "framework/paddle_mobile_object.h"
#include "framework/program.h"
namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32>
class Loader : PaddleMobileObject {
public:
const framework::Program<Dtype, P> Load(const std::string &dirname);
private:
void LoadVar(framework::LoDTensor *tensor, const std::string &file_path);
};
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "t_malloc.h"
#include <cstdlib>
#include <cstring>
namespace paddle_mobile {
namespace memory {
const int MALLOC_ALIGN = 16;
void Copy(void *dst, const void *src, size_t num) {
std::memcpy(dst, src, num);
};
void *Alloc(size_t size) {
size_t offset = sizeof(void *) + MALLOC_ALIGN - 1;
char *p = static_cast<char *>(malloc(offset + size));
if (!p) {
return nullptr;
}
void *r = reinterpret_cast<void *>(reinterpret_cast<size_t>(p + offset) &
(~(MALLOC_ALIGN - 1)));
static_cast<void **>(r)[-1] = p;
return r;
}
void Free(void *ptr) {
if (ptr) {
free(static_cast<void **>(ptr)[-1]);
}
}
} // namespace memory
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <cstddef>
#include <type_traits>
namespace paddle_mobile {
namespace memory {
void Copy(void *dst, const void *src, size_t num);
void *Alloc(size_t size);
void Free(void *ptr);
/**
* \brief Free memory block in one place.
*
* \note In some cases, custom deleter is used to
* deallocate the memory automatically for
* std::unique_ptr<T> in tensor.h.
* static_cast
*/
template <typename T>
class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD");
public:
explicit PODDeleter(){};
void operator()(T *ptr) { Free(static_cast<void *>(ptr)); }
};
/**
* \brief Free memory block in one place does not meet POD
*
* \note In some cases, custom deleter is used to
* deallocate the memory automatically for
* std::unique_ptr<T> in tensor.h.
* reinterpret_cast
*/
template <typename T>
class PlainDeleter {
public:
explicit PlainDeleter(){};
void operator()(T *ptr) { Free(reinterpret_cast<void *>(ptr)); }
};
} // namespace memory
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "conv_op.h"
#include "framework/data_type.h"
#include "framework/op_proto_maker.h"
#include "framework/operator.h"
namespace paddle_mobile {
namespace operators {
int ConvOutputSize(int input_size, int filter_size, int dilation, int padding,
int stride) {
const int dkernel = dilation * (filter_size - 1) + 1;
int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
return output_size;
}
template <typename Dtype, typename T>
void ConvOp<Dtype, T>::InferShape() const {
// std::cout << " begin get dims: " << std::endl;
auto in_dims = param_.Input()->dims();
// std::cout << " end get in dims: " << std::endl;
// std::cout << " in_dims: " << in_dims << std::endl;
// std::cout << " begin get Filter " << std::endl;
auto filter_dims = param_.Filter()->dims();
// std::cout << " end get Filter " << std::endl;
// std::cout << " begin get Attrs " << std::endl;
const std::vector<int> &strides = param_.Strides();
// std::cout << " end get Attrs " << strides[0] << std::endl;
std::vector<int> paddings = param_.Paddings();
int groups = param_.Groups();
std::vector<int> dilations = param_.Dilations();
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
dilations[i], paddings[i],
strides[i]));
}
framework::DDim ddim = framework::make_ddim(output_shape);
param_.Output()->Resize(ddim);
}
template class ConvOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "framework/operator.h"
#include "operators/kernel/conv_kernel.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class ConvOp : public framework::OperatorWithKernel<DeviceType> {
public:
ConvOp(const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const framework::AttributeMap& attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
protected:
void RunImpl() const {
operators::ConvKernel<DeviceType, T, ConvParam> kernel;
kernel.Compute(param_);
}
ConvParam param_;
};
} // operators
} // paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "operators/kernel/conv_kernel.h"
namespace paddle_mobile {
namespace operators {
bool IsExpand(const std::vector<int64_t>& filter_dim,
const std::vector<int>& strides, const std::vector<int>& paddings,
const std::vector<int>& dilations) {
bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
for (size_t j = 0; j < strides.size(); ++j) {
filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
strides_1 = strides_1 && (strides[j] == 1);
padding_0 = padding_0 && (paddings[j] == 0);
dilation_1 = dilation_1 && (dilations[j] == 1);
}
return !(filter_1 && strides_1 && padding_0 && dilation_1);
}
template <>
void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam& param) const {
const Tensor* input = param.Input();
std::cout << " conv param " << param << std::endl;
// The filter will be reshaped in the calculations,
// so here use an assignment operation,
// that avoids modifying the variable in the Scope.
Tensor filter = *param.Filter();
Tensor* output = param.Output();
// output->mutable_data<T>(context.GetPlace());
int groups = param.Groups();
std::vector<int> strides = param.Strides();
std::vector<int> paddings = param.Paddings();
std::vector<int> dilations = param.Dilations();
std::cout << " compute end get Attrs " << strides[0] << std::endl;
const int batch_size = static_cast<int>(input->dims()[0]);
// filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims()));
// output_shape_vec: {o_n, o_c, o_h, o_w} or {o_n, o_c, o_d, o_h, o_w}
std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims()));
// use col_shape in the im2col calculation
// col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d,
// o_h, o_w}
size_t data_dim = filter_shape_vec.size() - 2;
std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
col_shape_vec[0] = input->dims()[1] / groups;
for (size_t j = 0; j < data_dim; ++j) {
col_shape_vec[j + 1] = filter_shape_vec[j + 2];
col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2];
}
framework::DDim col_shape(framework::make_ddim(col_shape_vec));
// use col_matrix_shape in the gemm calculation
// size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d *
// o_h * o_w)
framework::DDim col_matrix_shape =
framework::flatten_to_2d(col_shape, data_dim + 1);
bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations);
Tensor col;
// col_matrix shares the same piece of data with col,
// but will be reshaped into a two-dimensional matrix shape
// to call the matrix multiplication interface.
Tensor col_matrix;
if (is_expand) {
col.mutable_data<float>(col_shape);
col_matrix.ShareDataWith(col);
col_matrix.Resize(col_matrix_shape);
}
framework::DDim input_shape = framework::slice_ddim(
input->dims(), 1, static_cast<int>(input->dims().size()));
framework::DDim filter_matrix_shape = {filter.dims()[0],
filter.numel() / filter.dims()[0]};
filter.Resize(filter_matrix_shape);
std::cout << " input dim " << input->dims() << std::endl;
std::cout << " output dim " << output->dims() << std::endl;
framework::DDim output_matrix_shape = {
output->dims()[1],
output->numel() / (output->dims()[0] * output->dims()[1])};
// convolution operator: im2col(or vol2col) + gemm
int in_step = static_cast<int>(input->dims()[1]) / groups;
int out_step = static_cast<int>(output->dims()[1]) / groups;
math::Vol2ColFunctor<CPU, float> vol2col;
math::Im2ColFunctor<math::ColFormat::kCFO, CPU, float> im2col;
// auto& dev_ctx = context.template
// device_context<DeviceContext>();
for (int i = 0; i < batch_size; i++) {
Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape);
for (int g = 0; g < groups; g++) {
Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step);
if (!is_expand) {
col.ShareDataWith(in_slice);
col_matrix.ShareDataWith(col);
col_matrix.Resize(col_matrix_shape);
} else if (data_dim == 2U) {
// im2col
im2col(in_slice, dilations, strides,
std::vector<int>{paddings[0], paddings[1], paddings[0],
paddings[1]},
&col);
} else if (data_dim == 3U) {
// vol2col
vol2col(in_slice, dilations, strides, paddings, &col);
}
// gemm
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul<float>(filter_slice, false, col_matrix, false, float(1.0),
&out_slice, float(0.0));
}
}
}
template class ConvKernel<CPU, float, ConvParam>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "framework/operator.h"
#include "operators/math/im2col.h"
#include "operators/math/math_function.h"
#include "operators/math/vol2col.h"
#include "operators/op_param.h"
#pragma once;
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T, typename P>
class ConvKernel : public framework::OpKernelBase<DeviceType, ConvParam> {
public:
void Compute(const ConvParam &param) const;
};
}
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "operators/kernel/conv_kernel.h"
namespace paddle_mobile {
namespace operators {
// template<>
// void ConvKernel<FPGA, float>::Compute(const ConvParam &param) const {}
//
// template class ConvKernel<FPGA, float>;
}
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "im2col.h"
#include "common/types.h"
namespace paddle_mobile {
namespace operators {
namespace math {
/*
* im = [input_channels, input_height, input_width]
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Im2ColFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor& im, const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col) {
// PADDLE_ENFORCE(im.dims().size() == 3);
// PADDLE_ENFORCE(col->dims().size() == 5);
int im_channels = im.dims()[0];
int im_height = im.dims()[1];
int im_width = im.dims()[2];
int filter_height = col->dims()[1];
int filter_width = col->dims()[2];
int col_height = col->dims()[3];
int col_width = col->dims()[4];
// PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] -
// ((dilation[0] * (filter_height - 1) + 1))) /
// stride[0] +
// 1,
// col_height,
// "Output_height and padding(padding_up, padding_down)
// are " "inconsistent.");
// PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] -
// ((dilation[1] * (filter_width - 1) + 1))) /
// stride[1] +
// 1,
// col_width,
// "Output_height and padding(padding_up, padding_down)
// are " "inconsistent.");
int channels_col = im_channels * filter_height * filter_width;
const T* im_data = im.data<T>();
T* col_data = col->data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
int c_im = c / (filter_width * filter_height);
for (int h = 0; h < col_height; ++h) {
int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0];
for (int w = 0; w < col_width; ++w) {
int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1];
int col_idx = (c * col_height + h) * col_width + w;
int im_idx = (im_row_idx + c_im * im_height) * im_width + im_col_idx;
col_data[col_idx] = (im_row_idx < 0 || im_row_idx >= im_height ||
im_col_idx < 0 || im_col_idx >= im_width)
? static_cast<T>(0)
: im_data[im_idx];
}
}
}
}
};
/*
* im = [input_channels, input_height, input_width]
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Col2ImFunctor<ColFormat::kCFO, CPU, T> {
public:
void operator()(const framework::Tensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im) {
// PADDLE_ENFORCE(im->dims().size() == 3);
// PADDLE_ENFORCE(col.dims().size() == 5);
int im_channels = im->dims()[0];
int im_height = im->dims()[1];
int im_width = im->dims()[2];
int filter_height = col.dims()[1];
int filter_width = col.dims()[2];
int col_height = col.dims()[3];
int col_width = col.dims()[4];
// PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] -
// ((dilation[0] * (filter_height - 1) + 1))) /
// stride[0] +
// 1,
// col_height,
// "Output_height and padding(padding_up, padding_down)
// are " "inconsistent.");
// PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] -
// ((dilation[1] * (filter_width - 1) + 1))) /
// stride[1] +
// 1,
// col_width,
// "Output_height and padding(padding_up, padding_down)
// are " "inconsistent.");
int channels_col = im_channels * filter_height * filter_width;
T* im_data = im->data<T>();
const T* col_data = col.data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
int c_im = c / (filter_width * filter_height);
for (int h = 0; h < col_height; ++h) {
int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0];
for (int w = 0; w < col_width; ++w) {
int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1];
if ((im_row_idx) >= 0 && (im_row_idx) < im_height &&
(im_col_idx) >= 0 && (im_col_idx) < im_width) {
im_data[(im_row_idx + c_im * im_height) * im_width + im_col_idx] +=
col_data[(c * col_height + h) * col_width + w];
}
}
}
}
}
};
template class Im2ColFunctor<ColFormat::kCFO, CPU, float>;
template class Im2ColFunctor<ColFormat::kCFO, CPU, double>;
template class Col2ImFunctor<ColFormat::kCFO, CPU, float>;
template class Col2ImFunctor<ColFormat::kCFO, CPU, double>;
/*
* im = [input_channels, input_height, input_width]
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Im2ColFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor& im, const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col) {
// PADDLE_ENFORCE(im.dims().size() == 3);
// PADDLE_ENFORCE(col->dims().size() == 5);
int im_channels = im.dims()[0];
int im_height = im.dims()[1];
int im_width = im.dims()[2];
int filter_height = col->dims()[3];
int filter_width = col->dims()[4];
int col_height = col->dims()[0];
int col_width = col->dims()[1];
// PADDLE_ENFORCE_EQ(
// (im_height + padding[0] + padding[2] - filter_height) / stride[0]
// + 1, col_height, "Output_height and padding(padding_up,
// padding_down) are " "inconsistent.");
// PADDLE_ENFORCE_EQ(
// (im_width + padding[1] + padding[3] - filter_width) / stride[1] +
// 1, col_width, "col_width and padding(padding_left, padding_right)
// are " "inconsistent.");
const T* im_data = im.data<T>();
T* col_data = col->data<T>();
for (int col_row_idx = 0; col_row_idx < col_height; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < col_width; ++col_col_idx) {
for (int channel = 0; channel < im_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) {
int im_row_offset =
col_row_idx * stride[0] + filter_row_idx - padding[0];
for (int filter_col_idx = 0; filter_col_idx < filter_width;
++filter_col_idx) {
int im_col_offset =
col_col_idx * stride[1] + filter_col_idx - padding[1];
int col_offset =
((((col_row_idx)*col_width + col_col_idx) * im_channels +
channel) *
filter_height +
filter_row_idx) *
filter_width +
filter_col_idx;
int im_offset = (channel * im_height + im_row_offset) * im_width +
im_col_offset;
col_data[col_offset] =
(im_row_offset < 0 || im_row_offset >= im_height ||
im_col_offset < 0 || im_col_offset >= im_width)
? static_cast<T>(0)
: im_data[im_offset];
}
}
}
}
}
}
};
/*
* im = [input_channels, input_height, input_width]
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Col2ImFunctor<ColFormat::kOCF, CPU, T> {
public:
void operator()(const framework::Tensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im) {
// PADDLE_ENFORCE(im->dims().size() == 3);
// PADDLE_ENFORCE(col.dims().size() == 5);
int im_channels = im->dims()[0];
int im_height = im->dims()[1];
int im_width = im->dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int col_height = col.dims()[0];
int col_width = col.dims()[1];
// PADDLE_ENFORCE_EQ(
// (im_height + padding[0] + padding[2] - filter_height) / stride[0]
// + 1, col_height, "Output_height and padding(padding_up,
// padding_down) are " "inconsistent.");
// PADDLE_ENFORCE_EQ(
// (im_width + padding[1] + padding[3] - filter_width) / stride[1] +
// 1, col_width, "col_width and padding(padding_left, padding_right)
// are " "inconsistent.");
T* im_data = im->data<T>();
const T* col_data = col.data<T>();
for (int col_row_idx = 0; col_row_idx < col_height; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < col_width; ++col_col_idx) {
for (int channel = 0; channel < im_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) {
int im_row_offset =
col_row_idx * stride[0] + filter_row_idx - padding[0];
for (int filter_col_idx = 0; filter_col_idx < filter_width;
++filter_col_idx) {
int im_col_offset =
col_col_idx * stride[1] + filter_col_idx - padding[1];
int col_offset =
(((col_row_idx * col_width + col_col_idx) * im_channels +
channel) *
filter_height +
filter_row_idx) *
filter_width +
filter_col_idx;
if (im_row_offset >= 0 && im_row_offset < im_height &&
im_col_offset >= 0 && im_col_offset < im_width) {
int im_offset =
(channel * im_height + im_row_offset) * im_width +
im_col_offset;
im_data[im_offset] += col_data[col_offset];
}
}
}
}
}
}
}
};
template class Im2ColFunctor<ColFormat::kOCF, CPU, float>;
template class Im2ColFunctor<ColFormat::kOCF, CPU, double>;
template class Col2ImFunctor<ColFormat::kOCF, CPU, float>;
template class Col2ImFunctor<ColFormat::kOCF, CPU, double>;
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "framework/tensor.h"
namespace paddle_mobile {
namespace operators {
namespace math {
/* The storage format of the coldata in the Im2ColFunctor and Col2ImFunctor. */
enum class ColFormat { kCFO = 0, kOCF = 1 };
/*
* \brief Converts the image data of three dimensions(CHW) into a colData of
* five dimensions in the Im2ColFunctor calculation,
* And in the Col2ImFunctor calculation, it is reversed.
*
* \param imData Image data.
* \param imShape The shape of imData,
* [input_channels, input_height, input_width].
* \param colData Column data.
* \param colShape The shape of colData.
*
* \param dilations dilation data.
* \param 2-dimension [dilation_height, dilation_width].
*
* \param strides stride data.
* \param 2-dimension [stride_height, stride_width].
*
* \param paddings padding data.
* \param 4-dimension [up_pad, left_pad, down_pad, right_pad].
*
* If the template argument Format is kCFO, the shape of colData is:
* [input_channels, filter_height, filter_width, output_height, output_width]
* So, it is easy to reshape into a convolution matrix for convolution
* calculation based on matrix multiplication.
* The shape of convolution matrix is [height, width], where the height is equal
* input_channels * filter_height * filter_width, and the width is equal
* output_height * output_width.
*
* Reshape:
* shape of colData shape of convolution matrix
* [input_channels,
* filter_height,
* filter_width, ======> [height, width]
* output_height,
* output_width]
*
* If the template argument Format is kOCF, the shape of colData is:
* [output_height, output_width, input_channels, filter_height, filter_width]
* So, it is easy to reshape into a sequence matrix for rnn calculation.
* The shape of sequence matrix is [seq_length, step_size], where the seq_length
* is equal output_height * output_width, and the step_size is equal
* input_channels * filter_height * filter_width.
*
* Reshape:
* shape of colData shape of sequence matrix
* [output_height,
* output_width,
* input_channels, ======> [seqLength, stepSize]
* filter_height,
* filter_width]
*
* \note The caller needs to ensure that imShape.inputChannels is equal to
* colShape.inputChannels.
*/
template <ColFormat Format, typename DeviceType, typename T>
class Im2ColFunctor {
public:
void operator()(const framework::Tensor& im, const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col);
};
template <ColFormat Format, typename DeviceType, typename T>
class Col2ImFunctor {
public:
void operator()(const framework::Tensor& col,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im);
};
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "math_function.h"
namespace paddle_mobile {
namespace operators {
namespace math {
template <>
void gemm<float>(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const float alpha,
const float* A, const float* B, const float beta, float* C) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cblas_sgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
void gemm<double>(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const double alpha,
const double* A, const double* B, const double beta,
double* C) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
cblas_dgemm(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
void gemm<float>(const bool transA, const bool transB, const int M, const int N,
const int K, const float alpha, const float* A, const int lda,
const float* B, const int ldb, const float beta, float* C,
const int ldc) {
cblas_sgemm(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A,
lda, B, ldb, beta, C, ldc);
}
template <>
void gemm<double>(const bool transA, const bool transB, const int M,
const int N, const int K, const double alpha, const double* A,
const int lda, const double* B, const int ldb,
const double beta, double* C, const int ldc) {
cblas_dgemm(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans,
transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A,
lda, B, ldb, beta, C, ldc);
}
template <>
void matmul<float>(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
// PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() ==
// 2,
// "The input and output of matmul be matrix");
//
// PADDLE_ENFORCE(platform::is_cpu_place(matrix_a.place()) &&
// platform::is_cpu_place(matrix_b.place()) &&
// platform::is_cpu_place(matrix_out->place()),
// "Matrix must all be in CPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<float>(transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<double>(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b,
double alpha, framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
// PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() ==
// 2,
// "The input and output of matmul be matrix");
//
// PADDLE_ENFORCE(platform::is_cpu_place(matrix_a.place()) &&
// platform::is_cpu_place(matrix_b.place()) &&
// platform::is_cpu_place(matrix_out->place()),
// "Matrix must all be in CPUPlace");
int M = dim_out[0];
int N = dim_out[1];
int K = (trans_a == false) ? dim_a[1] : dim_a[0];
CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<double>(transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cblas.h>
#include <cmath>
#include "framework/tensor.h"
namespace paddle_mobile {
namespace operators {
namespace math {
template <typename T>
void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const T alpha, const T* A,
const T* B, const T beta, T* C);
template <typename T>
void gemm(const bool transA, const bool transB, const int M, const int N,
const int K, const T alpha, const T* A, const int lda, const T* B,
const int ldb, const T beta, T* C, const int ldc);
// matrix multiply with continuous memory
template <typename T>
void matmul(const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, T alpha,
framework::Tensor* matrix_out, T beta);
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "vol2col.h"
namespace paddle_mobile {
namespace operators {
namespace math {
using Tensor = paddle_mobile::framework::Tensor;
/*
* vol = [input_channels, input_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <typename T>
class Vol2ColFunctor<CPU, T> {
public:
void operator()(const Tensor& vol, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* col) const {
// PADDLE_ENFORCE(vol.dims().size() == 4);
// PADDLE_ENFORCE(col->dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col->dims()[1];
int filter_height = col->dims()[2];
int filter_width = col->dims()[3];
int output_depth = col->dims()[4];
int output_height = col->dims()[5];
int output_width = col->dims()[6];
int channels_col =
input_channels * filter_depth * filter_height * filter_width;
// PADDLE_ENFORCE_EQ((input_depth + 2 * paddings[0] -
// ((dilations[0] * (filter_depth - 1) + 1))) /
// strides[0] +
// 1,
// output_depth,
// "input_depth and output_depth are "
// "mismatching.");
// PADDLE_ENFORCE_EQ((input_height + 2 * paddings[1] -
// ((dilations[1] * (filter_height - 1) + 1))) /
// strides[1] +
// 1,
// output_height,
// "input_height and output_height are "
// "mismatching.");
// PADDLE_ENFORCE_EQ((input_width + 2 * paddings[2] -
// ((dilations[2] * (filter_width - 1) + 1))) /
// strides[2] +
// 1,
// output_width,
// "input_width and output_width are "
// "mismatching.");
const T* vol_data = vol.data<T>();
T* col_data = col->data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
int d_offset = (c / filter_width / filter_height) % filter_depth;
int c_in = c / filter_width / filter_height / filter_depth;
for (int d = 0; d < output_depth; ++d) {
int d_pad = d * strides[0] - paddings[0] + d_offset * dilations[0];
for (int h = 0; h < output_height; ++h) {
int h_pad = h * strides[1] - paddings[1] + h_offset * dilations[1];
for (int w = 0; w < output_width; ++w) {
int w_pad = w * strides[2] - paddings[2] + w_offset * dilations[2];
int col_idx =
((c * output_depth + d) * output_height + h) * output_width + w;
int vol_idx =
((c_in * input_depth + d_pad) * input_height + h_pad) *
input_width +
w_pad;
col_data[col_idx] =
(h_pad < 0 || h_pad >= input_height || w_pad < 0 ||
w_pad >= input_width || d_pad < 0 || d_pad >= input_depth)
? static_cast<T>(0)
: vol_data[vol_idx];
}
}
}
}
}
};
/*
* vol = [input_channels,input_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <typename T>
class Col2VolFunctor<CPU, T> {
public:
void operator()(const Tensor& col, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* vol) const {
// PADDLE_ENFORCE(vol->dims().size() == 4);
// PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol->dims()[0];
int input_depth = vol->dims()[1];
int input_height = vol->dims()[2];
int input_width = vol->dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
int channels_col =
input_channels * filter_depth * filter_height * filter_width;
// PADDLE_ENFORCE_EQ((input_depth + 2 * paddings[0] -
// ((dilations[0] * (filter_depth - 1) + 1))) /
// strides[0] +
// 1,
// output_depth,
// "input_depth and output_depth are "
// "mismatching.");
// PADDLE_ENFORCE_EQ((input_height + 2 * paddings[1] -
// ((dilations[1] * (filter_height - 1) + 1))) /
// strides[1] +
// 1,
// output_height,
// "input_height and output_height are "
// "mismatching.");
// PADDLE_ENFORCE_EQ((input_width + 2 * paddings[2] -
// ((dilations[2] * (filter_width - 1) + 1))) /
// strides[2] +
// 1,
// output_width,
// "input_width and output_width are "
// "mismatching.");
T* vol_data = vol->data<T>();
const T* col_data = col.data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
int d_offset = (c / filter_width / filter_height) % filter_depth;
int cIm = c / filter_width / filter_height / filter_depth;
for (int d = 0; d < output_depth; ++d) {
int d_pad = d * strides[0] - paddings[0] + d_offset * dilations[0];
for (int h = 0; h < output_height; ++h) {
int h_pad = h * strides[1] - paddings[1] + h_offset * dilations[1];
for (int w = 0; w < output_width; ++w) {
int w_pad = w * strides[2] - paddings[2] + w_offset * dilations[2];
if (h_pad >= 0 && h_pad < input_height && w_pad >= 0 &&
w_pad < input_width && d_pad >= 0 && d_pad < input_depth) {
int vol_idx =
((cIm * input_depth + d_pad) * input_height + h_pad) *
input_width +
w_pad;
int col_idx =
((c * output_depth + d) * output_height + h) * output_width +
w;
vol_data[vol_idx] += col_data[col_idx];
}
}
}
}
}
}
};
template class Vol2ColFunctor<CPU, float>;
template class Vol2ColFunctor<CPU, double>;
template class Col2VolFunctor<CPU, float>;
template class Col2VolFunctor<CPU, double>;
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "common/types.h"
#include "framework/tensor.h"
namespace paddle_mobile {
namespace operators {
namespace math {
/*
* \brief Converts the feature data of four dimensions(CDHW) into a colData of
* seven dimensions in the Vol2ColFunctor calculation,
* And in the Col2VolFunctor calculation, it is reversed.
*
* \param volData Vol data.
* \param volShape The shape of volData,
* [input_channels, input_depth, input_height, input_width].
* \param colData Column data.
* \param colShape The shape of colData.
*
* \param dilations dilation data.
* \param 3-dimension [dilation_depth, dilation_height, dilation_width].
*
* \param strides stride data.
* \param 3-dimension [stride_depth, stride_height, stride_width].
*
* \param paddings padding data.
* \param 3-dimension [d_pad, h_pad, w_pad].
*
* The shape of colData is:
* [input_channels, filter_depth, filter_height, filter_width, output_depth,
* output_height, output_width]
* So, it is easy to reshape into a convolution matrix for convolution
* calculation based on matrix multiplication.
* The shape of convolution matrix is [height, width], where the height is equal
* input_channels * filter_depth * filter_height * filter_width, and the width
* is equal output_depth * output_height * output_width.
*
* Reshape:
* shape of colData shape of convolution matrix
* [input_channels,
* filter_depth,
* filter_height,
* filter_width, ======> [height, width]
* output_depth,
* output_height,
* output_width]
*
* \note The caller needs to ensure that volShape.inputChannels is equal to
* colShape.inputChannels.
*/
using Tensor = paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T>
class Vol2ColFunctor {
public:
void operator()(const Tensor& vol, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* col) const;
};
template <typename DeviceType, typename T>
class Col2VolFunctor {
public:
void operator()(const Tensor& col, const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings, Tensor* vol) const;
};
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "op_param.h"
namespace paddle_mobile {
namespace operators {
std::ostream& operator<<(std::ostream& os, const ConvParam& conv_param) {
os << "parameter of conv: " << std::endl;
os << " stride: "
<< " (" << conv_param.Strides()[0] << conv_param.Strides()[1] << ") "
<< std::endl;
os << " paddings: "
<< " (" << conv_param.Paddings()[0] << conv_param.Paddings()[1] << ") "
<< std::endl;
os << " dilations: "
<< " (" << conv_param.Dilations()[0] << conv_param.Dilations()[1] << ") "
<< std::endl;
os << " groups: " << conv_param.Groups() << std::endl;
os << " input dims: " << conv_param.Input()->dims() << std::endl;
os << " filter dims: " << conv_param.Filter()->dims() << std::endl;
os << " output dims: " << conv_param.Output()->dims() << std::endl;
return os;
}
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once;
#include "common/type_define.h"
#include "framework/lod_tensor.h"
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/variable.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
class OpParam : PaddleMobileObject {
public:
protected:
template <typename T>
static T *InputFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Input", inputs, scope);
}
template <typename T>
static T *OutputFrom(const VariableNameMap &outputs, const Scope &scope) {
return GetVarValue<T>("Output", outputs, scope);
}
template <typename T>
static T *FilterFrom(const VariableNameMap &inputs, const Scope &scope) {
return GetVarValue<T>("Filter", inputs, scope);
}
template <typename T>
static const T GetAttr(std::string key, const AttributeMap &map) {
return ((Attribute)map.at(key)).Get<T>();
}
template <typename T>
static T *GetVarValue(std::string key, const VariableNameMap &var_map,
const Scope &scope) {
auto var_vec = var_map.at(key);
if (var_vec.size()) {
// std::cout << " get var value -- " << var_vec[0] << std::endl;
auto var = scope.FindVar(var_vec[0]);
return var->GetMutable<T>();
} else {
return nullptr;
}
}
};
class ConvParam : OpParam {
public:
ConvParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
filter_ = FilterFrom<framework::LoDTensor>(inputs, scope);
input_ = InputFrom<framework::Tensor>(inputs, scope);
output_ = OutputFrom<framework::Tensor>(outputs, scope);
strides_ = GetAttr<std::vector<int>>("strides", attrs);
paddings_ = GetAttr<std::vector<int>>("paddings", attrs);
dilations_ = GetAttr<std::vector<int>>("dilations", attrs);
groups = GetAttr<int>("groups", attrs);
}
const Tensor *Input() const { return input_; }
const LoDTensor *Filter() const { return filter_; }
Tensor *Output() const { return output_; }
const std::vector<int> &Strides() const { return strides_; }
const std::vector<int> &Paddings() const { return paddings_; }
const std::vector<int> &Dilations() const { return dilations_; }
const int &Groups() const { return groups; }
private:
Tensor *input_;
Tensor *output_;
LoDTensor *filter_;
std::vector<int> strides_;
std::vector<int> paddings_;
std::vector<int> dilations_;
int groups;
};
std::ostream &operator<<(std::ostream &os, const ConvParam &conv_param);
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <typeindex>
#include "framework/framework.pb.h"
namespace paddle_mobile {
namespace framework {
inline proto::VarType::Type ToDataType(std::type_index type) {
/*if (typeid(platform::float16).hash_code() == type.hash_code()) {
return proto::VarType::FP16;
} else */
if (typeid(const float).hash_code() == type.hash_code()) {
// CPPLint complains Using C-style cast. Use static_cast<float>() instead
// One fix to this is to replace float with const float because
// typeid(T) == typeid(const T)
// http://en.cppreference.com/w/cpp/language/typeid
return proto::VarType::FP32;
} else if (typeid(const double).hash_code() == type.hash_code()) {
return proto::VarType::FP64;
} else if (typeid(const int).hash_code() == type.hash_code()) {
return proto::VarType::INT32;
} else if (typeid(const int64_t).hash_code() == type.hash_code()) {
return proto::VarType::INT64;
} else if (typeid(const bool).hash_code() == type.hash_code()) {
return proto::VarType::BOOL;
} else {
// PADDLE_THROW("Not supported");
// std::cout << "Not supported";
}
}
inline std::type_index ToTypeIndex(proto::VarType::Type type) {
switch (type) {
// case proto::VarType::FP16:
// return typeid(platform::float16);
case proto::VarType::FP32:
return typeid(float);
case proto::VarType::FP64:
return typeid(double);
case proto::VarType::INT32:
return typeid(int);
case proto::VarType::INT64:
return typeid(int64_t);
case proto::VarType::BOOL:
return typeid(bool);
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
}
}
template <typename Visitor>
inline void VisitDataType(proto::VarType::Type type, Visitor visitor) {
switch (type) {
// case proto::VarType::FP16:
// visitor.template operator()<platform::float16>();
// break;
case proto::VarType::FP32:
visitor.template operator()<float>();
break;
case proto::VarType::FP64:
visitor.template operator()<double>();
break;
case proto::VarType::INT32:
visitor.template operator()<int>();
break;
case proto::VarType::INT64:
visitor.template operator()<int64_t>();
break;
case proto::VarType::BOOL:
visitor.template operator()<bool>();
break;
default:
// PADDLE_THROW("Not supported");
printf("Not supported");
}
}
inline std::string DataTypeToString(const proto::VarType::Type type) {
switch (type) {
case proto::VarType::FP16:
return "float16";
case proto::VarType::FP32:
return "float32";
case proto::VarType::FP64:
return "float64";
case proto::VarType::INT16:
return "int16";
case proto::VarType::INT32:
return "int32";
case proto::VarType::INT64:
return "int64";
case proto::VarType::BOOL:
return "bool";
default:
// PADDLE_THROW("Not support type %d", type);
printf("Not support type %d", type);
}
}
inline std::ostream &operator<<(std::ostream &out,
const proto::VarType::Type &type) {
out << DataTypeToString(type);
return out;
}
} // namespace framework
} // namespace paddle_mobile
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __CUDACC__
#define HOSTDEVICE __host__ __device__
#define DEVICE __device__
#define HOST __host__
#else
#define HOSTDEVICE
#define DEVICE
#define HOST
#endif
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
// Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname&) = delete; \
classname(classname&&) = delete; \
classname& operator=(const classname&) = delete; \
classname& operator=(classname&&) = delete
#endif
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "framework/executor.h"
#include "io.h"
#include "test_helper.h"
//
// template <typename T>
// void SetupTensor(paddle::framework::LoDTensor* input,
// paddle::framework::DDim dims, T lower, T upper) {
// static unsigned int seed = 100;
// std::mt19937 rng(seed++);
// std::uniform_real_distribution<double> uniform_dist(0, 1);
//
// T* input_ptr = input->mutable_data<T>(dims, paddle::platform::CPUPlace());
// for (int i = 0; i < input->numel(); ++i) {
// input_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) +
// lower);
// }
//}
int main() {
std::string data_set = "cifar10";
//
// if (data_set == "cifar10") {
// SetupTensor<float>(&input, {FLAGS_batch_size, 3, 32, 32},
// static_cast<float>(0), static_cast<float>(1));
// } else if (data_set == "imagenet") {
// SetupTensor<float>(&input, {FLAGS_batch_size, 3, 224, 224},
// static_cast<float>(0), static_cast<float>(1));
// } else {
// LOG(FATAL) << "Only cifar10 or imagenet is supported.";
// }
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string(
"../test/models/image_classification_resnet.inference.model"));
paddle_mobile::framework::Executor<paddle_mobile::CPU> executor(program);
paddle_mobile::framework::Tensor input;
SetupTensor<float>(&input, {1, 3, 32, 32}, static_cast<float>(0),
static_cast<float>(1));
float* input_ptr = input.data<float>();
for (int i = 0; i < input.numel(); ++i) {
// std::cout << input_ptr[i] << std::endl;
}
// std::cout << "input: " << input.memory_size() << std::endl;
// std::cout << "input: " << input.numel() << std::endl;
auto output = executor.predict(input);
// std::cout << "output: " << output->memory_size() << std::endl;
// std::cout << "output: " << output->numel() << std::endl;
// float* output_ptr = output->data<float>();
// for (int j = 0; j < output->numel(); ++j) {
// std::cout << " value of output: " << output_ptr[j] << std::endl;
// }
return 0;
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include <random>
#include "framework/ddim.h"
#include "framework/tensor.h"
template <typename T>
void SetupTensor(paddle_mobile::framework::Tensor* input,
paddle_mobile::framework::DDim dims, T lower, T upper) {
static unsigned int seed = 100;
std::mt19937 rng(seed++);
std::uniform_real_distribution<double> uniform_dist(0, 1);
T* input_ptr = input->mutable_data<T>(dims);
for (int i = 0; i < input->numel(); ++i) {
input_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册