提交 a2a0d6f8 编写于 作者: C caoying03

Merge branch 'develop' into softmax_with_cross_entropy_op

......@@ -27,3 +27,4 @@ CMakeFiles
cmake_install.cmake
paddle/.timestamp
python/paddlepaddle.egg-info/
paddle/pybind/pybind.h
......@@ -36,10 +36,6 @@ before_install:
# protobuf version.
- sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
- curl https://glide.sh/get | bash
- eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
- go get -u github.com/alecthomas/gometalinter
- gometalinter --install
- |
function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; }
script:
......
......@@ -27,7 +27,7 @@ if(NOT CMAKE_CROSSCOMPILING)
endif(NOT CMAKE_CROSSCOMPILING)
find_package(Git REQUIRED)
find_package(Threads REQUIRED)
if(NOT ANDROID)
if(NOT ANDROID AND NOT IOS)
find_package(Boost QUIET)
endif()
......@@ -64,27 +64,29 @@ if(NOT CMAKE_BUILD_TYPE)
FORCE)
endif()
if(ANDROID)
if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
# TODO: support glog for Android api 16 ~ 19 in the future
message(WARNING "Using the unofficial git repository <https://github.com/Xreki/glog.git> instead")
if(ANDROID OR IOS)
if(ANDROID)
if(AND ${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
# TODO: support glog for Android api 16 ~ 19 in the future
message(WARNING "Using the unofficial git repository <https://github.com/Xreki/glog.git> instead")
endif()
endif()
set(WITH_GPU OFF CACHE STRING
"Disable GPU when cross-compiling for Android" FORCE)
"Disable GPU when cross-compiling for Android and iOS" FORCE)
set(WITH_AVX OFF CACHE STRING
"Disable AVX when cross-compiling for Android" FORCE)
"Disable AVX when cross-compiling for Android and iOS" FORCE)
set(WITH_PYTHON OFF CACHE STRING
"Disable PYTHON when cross-compiling for Android" FORCE)
"Disable PYTHON when cross-compiling for Android and iOS" FORCE)
set(WITH_RDMA OFF CACHE STRING
"Disable RDMA when cross-compiling for Android" FORCE)
"Disable RDMA when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLDNN OFF CACHE STRING
"Disable MKLDNN when cross-compiling for Android" FORCE)
"Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLML OFF CACHE STRING
"Disable MKLML package when cross-compiling for Android" FORCE)
endif(ANDROID)
"Disable MKLML package when cross-compiling for Android and iOS" FORCE)
endif()
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
"A path setting third party libraries download & build directories.")
......
......@@ -171,3 +171,10 @@ if (REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY)
add_definitions(-DPADDLE_USE_REFERENCE_CBLAS)
message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})")
endif()
if(IOS_USE_VECLIB_FOR_BLAS AND VECLIB_FOUND)
set(CBLAS_FOUND ON)
set(CBLAS_PROVIDER vecLib)
set(CBLAS_INC_DIR ${VECLIB_INC_DIR})
add_definitions(-DPADDLE_USE_VECLIB)
endif()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a toolchain file for cross-compiling for iOS, and the
# configuration largely refers to public toolchain file:
# https://raw.githubusercontent.com/leetal/ios-cmake/master/ios.toolchain.cmake
# and
# https://github.com/cristeab/ios-cmake
#
# Supports options:
# IOS_PLATFORM = OS (default) or SIMULATOR
# This decides if SDKS will be selected from the iPhoneOS.platform or iPhoneSimulator.platform folders
# OS - the default, used to build for iPhone and iPad physical devices, which have an arm arch.
# SIMULATOR - used to build for the Simulator platforms, which have an x86 arch.
# IOS_ARCH
# The archectures wanted to support, such "arm64", "armv7;arm64"
# IOS_DEPLOYMENT_TARGET
# The minimum iOS deployment version, such as "7.0"
# IOS_ENABLE_BITCODE = ON (default) or OFF
# IOS_USE_VECLIB_FOR_BLAS = OFF (default) or ON
# IOS_DEVELOPER_ROOT = automatic(default) or /path/to/platform/Developer folder
# By default this location is automatcially chosen based on the IOS_PLATFORM value above.
# If set manually, it will override the default location and force the user of a particular Developer Platform
# IOS_SDK_ROOT = automatic(default) or /path/to/platform/Developer/SDKs/SDK folder
# By default this location is automatcially chosen based on the IOS_DEVELOPER_ROOT value.
# In this case it will always be the most up-to-date SDK found in the IOS_DEVELOPER_ROOT path.
# If set manually, this will force the use of a specific SDK version
# Macros:
# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE)
# A convenience macro for setting xcode specific properties on targets
# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1")
# find_host_package (PROGRAM ARGS)
# A macro used to find executable programs on the host system, not within the iOS environment.
# Thanks to the android-cmake project for providing the command
if(NOT IOS)
return()
endif()
set(CMAKE_SYSTEM_NAME Darwin)
# Get the Xcode version being used.
execute_process(COMMAND xcodebuild -version
OUTPUT_VARIABLE XCODE_VERSION
RESULT_VARIABLE XCODE_VERSION_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT ${XCODE_VERSION_RESULT})
string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION "${XCODE_VERSION}")
string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION "${XCODE_VERSION}")
message(STATUS "Building with Xcode version: ${XCODE_VERSION}")
else()
message(FATAL_ERROR "Cannot execute xcodebuild, please check whether xcode is installed.")
endif()
# Required as of cmake 2.8.10
set(CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING "Force unset of the deployment target for iOS" FORCE)
# Setup iOS platform unless specified manually with IOS_PLATFORM
if(NOT DEFINED IOS_PLATFORM)
set(IOS_PLATFORM "OS")
endif()
set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING "Type of iOS Platform")
# Set the architecture for iOS
if(NOT DEFINED IOS_ARCH)
if(IOS_PLATFORM STREQUAL "OS")
# FIXME(liuyiqun): support "armv7;armv7s;arm64" future
set(IOS_ARCH "arm64")
elseif(IOS_PLATFORM STREQUAL "SIMULATOR")
set(IOS_ARCH "i386;x86_64")
elseif(IOS_PLATFORM STREQUAL "WATCHOS")
set(IOS_ARCH armv7k)
endif()
endif()
set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS")
# Specify minimum iOS deployment version
if(NOT DEFINED IOS_DEPLOYMENT_TARGET)
set(IOS_DEPLOYMENT_TARGET "7.0")
endif()
set(IOS_DEPLOYMENT_TARGET ${IOS_DEPLOYMENT_TARGET} CACHE STRING "Minimum iOS version")
# Whether to enable bitcode
if(NOT DEFINED IOS_ENABLE_BITCODE)
set(IOS_ENABLE_BITCODE ON)
endif()
set(IOS_ENABLE_BITCODE ${IOS_ENABLE_BITCODE} CACHE BOOL "Whether to enable bitcode")
if(NOT DEFINED IOS_USE_VECLIB_FOR_BLAS)
set(IOS_USE_VECLIB_FOR_BLAS OFF)
endif()
set(IOS_USE_VECLIB_FOR_BLAS ${IOS_UES_VECLIB_FOR_BLAS} CACHE BOOL "Whether to use veclib")
# Check the platform selection and setup for developer root
if(${IOS_PLATFORM} STREQUAL "OS")
set(IOS_PLATFORM_LOCATION "iPhoneOS.platform")
set(XCODE_IOS_PLATFORM iphoneos)
# This causes the installers to properly locate the output libraries
set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphoneos")
elseif(${IOS_PLATFORM} STREQUAL "SIMULATOR")
set(IOS_PLATFORM_LOCATION "iPhoneSimulator.platform")
set(XCODE_IOS_PLATFORM iphonesimulator)
# This causes the installers to properly locate the output libraries
set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphonesimulator")
elseif(${IOS_PLATFORM} STREQUAL "WATCHOS")
set(IOS_PLATFORM_LOCATION "WatchOS.platform")
set(XCODE_IOS_PLATFORM watchos)
# This causes the installers to properly locate the output libraries
set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-watchos")
else(${IOS_PLATFORM} STREQUAL "OS")
message(FATAL_ERROR "Unsupported IOS_PLATFORM value selected. Please set to\n"
"\t OS, SIMULATOR, or WATCHOS.")
endif()
# Check iOS developer toolchain
if(NOT DEFINED IOS_DEVELOPER_ROOT)
# Setup iOS developer location
execute_process(COMMAND xcode-select -print-path
OUTPUT_VARIABLE XCODE_DEVELOPER_DIR
RESULT_VARIABLE XCODE_DEVELOPER_DIR_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
# Xcode 4.3 changed the installation location, choose the most recent one available
if(${XCODE_VERSION} VERSION_LESS "4.3.0")
set(IOS_DEVELOPER_ROOT "/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer")
else()
set(IOS_DEVELOPER_ROOT "${XCODE_DEVELOPER_DIR}/Platforms/${IOS_PLATFORM_LOCATION}/Developer")
endif()
endif()
if(EXISTS ${IOS_DEVELOPER_ROOT})
set(IOS_DEVELOPER_ROOT ${IOS_DEVELOPER_ROOT} CACHE PATH "Location of iOS Platform")
else()
message(FATAL_ERROR "Invalid IOS_DEVELOPER_ROOT: ${IOS_DEVELOPER_ROOT} does not exist.")
endif()
# Check iOS SDK
if(NOT DEFINED IOS_SDK_ROOT)
# Find and use the most recent iOS sdk
file(GLOB IOS_SDK_LISTS "${IOS_DEVELOPER_ROOT}/SDKs/*")
if(IOS_SDK_LISTS)
list(SORT IOS_SDK_LISTS)
list(REVERSE IOS_SDK_LISTS)
list(GET IOS_SDK_LISTS 0 IOS_SDK_ROOT)
else(IOS_SDK_LISTS)
message(FATAL_ERROR "No iOS SDK's found in default search path ${IOS_DEVELOPER_ROOT}."
" Please manually set IOS_SDK_ROOT or install the iOS SDK.")
endif(IOS_SDK_LISTS)
endif()
if(EXISTS ${IOS_SDK_ROOT})
set(IOS_SDK_ROOT ${IOS_SDK_ROOT} CACHE PATH "Location of the selected iOS SDK")
message(STATUS "iOS toolchain: ${IOS_SDK_ROOT}")
else()
message(FATAL_ERROR "Invalid IOS_SDK_ROOT: ${IOS_SDK_ROOT} does not exist.")
endif()
# Set the sysroot default to the most recent SDK
set(CMAKE_OSX_SYSROOT ${IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support")
# Get version of iOS SDK
execute_process(COMMAND xcodebuild -sdk ${CMAKE_OSX_SYSROOT} -version SDKVersion
OUTPUT_VARIABLE IOS_SDK_VERSION
RESULT_VARIABLE IOS_SDK_VERSION_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(${IOS_SDK_VERSION_RESULT})
string(REGEX MATCH "(([0-9]+)\\.)+([0-9]+)" IOS_SDK_VERSION "${IOS_SDK_ROOT}")
endif()
if(NOT IOS_SDK_VERSION)
message(WARNING "Cannot get SDK's version.")
set(IOS_SDK_VERSION 1)
endif()
set(CMAKE_SYSTEM_VERSION ${IOS_SDK_VERSION})
# Find the C & C++ compilers for the specified SDK.
if(NOT CMAKE_C_COMPILER)
# Default to use clang
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang
OUTPUT_VARIABLE IOS_C_COMPILER
RESULT_VARIABLE IOS_C_COMPILER_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(${IOS_C_COMPILER_RESULT})
get_filename_component(IOS_C_COMPILER clang PROGRAM)
endif()
else(NOT CMAKE_C_COMPILER)
# User can set it in cmake command
get_filename_component(IOS_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM)
endif(NOT CMAKE_C_COMPILER)
if(NOT EXISTS ${IOS_C_COMPILER})
message(FATAL_ERROR "Cannot find C compiler: ${IOS_C_COMPILER}")
endif()
if(NOT CMAKE_CXX_COMPILER)
# Default to use clang++
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang++
OUTPUT_VARIABLE IOS_CXX_COMPILER
RESULT_VARIABLE IOS_CXX_COMPILER_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(${IOS_CXX_COMPILER_RESULT})
get_filename_component(IOS_CXX_COMPILER clang++ PROGRAM)
endif()
else(NOT CMAKE_CXX_COMPILER)
# User can set it in cmake command
get_filename_component(IOS_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM)
endif(NOT CMAKE_CXX_COMPILER)
if(NOT EXISTS ${IOS_CXX_COMPILER})
message(FATAL_ERROR "Cannot find CXX compiler: ${IOS_CXX_COMPILER}")
endif()
set(CMAKE_C_COMPILER ${IOS_C_COMPILER} CACHE PATH "C compiler" FORCE)
set(CMAKE_CXX_COMPILER ${IOS_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE)
set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ")
set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ")
set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}")
set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
# Set iOS specific C/C++ flags
if(IOS_PLATFORM STREQUAL "OS")
if(XCODE_VERSION VERSION_LESS "7.0")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS "-mios-version-min=${IOS_DEPLOYMENT_TARGET}")
else()
# Xcode 7.0+ uses flags we can build directly from XCODE_IOS_PLATFORM.
set(XCODE_IOS_PLATFORM_VERSION_FLAGS "-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
else()
set(XCODE_IOS_FLATFORM_VERSION_FLAGS "-mios-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
if(IOS_ENABLE_BITCODE)
set(XCODE_IOS_BITCODE_FLAGS "${IOS_COMPILER_FLAGS} -fembed-bitcode")
else()
set(XCODE_IOS_BITCODE_FLAGS "")
endif()
set(IOS_COMPILER_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${XCODE_IOS_BITCODE_FLAGS}")
# Hidden visibilty is required for cxx on iOS
set(CMAKE_C_FLAGS "${IOS_COMPILER_FLAGS} ${CMAKE_C_FLAGS}" CACHE STRING "C flags")
set(CMAKE_CXX_FLAGS "${IOS_COMPILER_FLAGS} -fvisibility-inlines-hidden ${CMAKE_CXX_FLAGS}" CACHE STRING "CXX flags")
set(IOS_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first")
if(IOS_USE_VECLIB_FOR_BLAS)
# Find vecLib for iOS
set(VECLIB_SEARCH_DIRS
${IOS_SDK_ROOT}/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks
${IOS_SDK_ROOT}/System/Library/Frameworks/Accelerate.framework/Frameworks
)
find_path(VECLIB_INC_DIR vecLib.h PATHS ${VECLIB_SEARCH_DIRS}/vecLib.framework/Headers)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(vecLib DEFAULT_MSG VECLIB_INC_DIR)
if(VECLIB_FOUND)
if(VECLIB_INC_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*")
set(IOS_LINK_FLAGS ${IOS_LINK_FLAGS} -lcblas "-framework vecLib")
message(STATUS "Found standalone vecLib.framework")
else()
set(IOS_LINK_FLAGS ${IOS_LINK_FLAGS} -lcblas "-framework Accelerate")
message(STATUS "Found vecLib as part of Accelerate.framework")
endif()
endif()
endif()
set(CMAKE_C_LINK_FLAGS "${IOS_LINK_FLAGS} ${CMAKE_C_LINK_FLAGS}")
set(CMAKE_CXX_LINK_FLAGS "${IOS_LINK_FLAGS} ${CMAKE_CXX_LINK_FLAGS}")
set(CMAKE_PLATFORM_HAS_INSTALLNAME 1)
if(NOT IOS_ENABLE_BITCODE)
set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -headerpad_max_install_names")
set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -headerpad_max_install_names")
else()
set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib")
set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle")
endif()
set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,")
set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,")
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a")
# hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old build tree
# (where install_name_tool was hardcoded) and where CMAKE_INSTALL_NAME_TOOL isn't in the cache
# and still cmake didn't fail in CMakeFindBinUtils.cmake (because it isn't rerun)
# hardcode CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did before, Alex
if(NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool)
endif()
# Set the find root to the iOS developer roots and to user defined paths
set(CMAKE_FIND_ROOT_PATH ${IOS_DEVELOPER_ROOT} ${IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH}
CACHE string "iOS find search path root")
# default to searching for frameworks first
set(CMAKE_FIND_FRAMEWORK FIRST)
# set up the default search directories for frameworks
set(CMAKE_SYSTEM_FRAMEWORK_PATH
${IOS_SDK_ROOT}/System/Library/Frameworks
${IOS_SDK_ROOT}/System/Library/PrivateFrameworks
${IOS_SDK_ROOT}/Developer/Library/Frameworks
)
# only search the iOS sdks, not the remainder of the host filesystem
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
message(STATUS "iOS: Targeting iOS '${CMAKE_SYSTEM_VERSION}', "
"building for '${IOS_PLATFORM}' platform, with architecture '${CMAKE_OSX_ARCHITECTURES}'")
message(STATUS "System CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
message(STATUS "System CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
# Used in ExternalProject command
string(REPLACE ";" "\\$<SEMICOLON>" EXTERNAL_IOS_ARCHITECTURES "${CMAKE_OSX_ARCHITECTURES}")
set(EXTERNAL_OPTIONAL_ARGS
-DCMAKE_OSX_SYSROOT=${CMAKE_OSX_SYSROOT}
-DCMAKE_OSX_ARCHITECTURES=${EXTERNAL_IOS_ARCHITECTURES})
# This little macro lets you set any XCode specific property
macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE)
set_property (TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} ${XCODE_VALUE})
endmacro(set_xcode_property)
# This macro lets you find executable programs on the host system
macro(find_host_package)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
set(IOS FALSE)
find_package(${ARGN})
set(IOS TRUE)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
endmacro(find_host_package)
......@@ -39,13 +39,14 @@ ExternalProject_Add(
PREFIX ${GFLAGS_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DBUILD_TESTING=OFF
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DBUILD_TESTING=OFF
-DCMAKE_BUILD_TYPE=Release
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
......
......@@ -34,16 +34,17 @@ ExternalProject_Add(
PREFIX ${GLOG_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR}
CMAKE_ARGS -DCMAKE_INSTALL_LIBDIR=${GLOG_INSTALL_DIR}/lib
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DWITH_GFLAGS=ON
CMAKE_ARGS -Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
CMAKE_ARGS -DBUILD_TESTING=OFF
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=${GLOG_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DWITH_GFLAGS=ON
-Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
-DBUILD_TESTING=OFF
-DCMAKE_BUILD_TYPE=Release
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GLOG_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR:PATH=${GLOG_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
......
......@@ -48,15 +48,16 @@ IF(WITH_TESTING)
PREFIX ${GTEST_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DBUILD_GMOCK=ON
CMAKE_ARGS -Dgtest_disable_pthreads=ON
CMAKE_ARGS -Dgtest_force_shared_crt=ON
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DBUILD_GMOCK=ON
-Dgtest_disable_pthreads=ON
-Dgtest_force_shared_crt=ON
-DCMAKE_BUILD_TYPE=Release
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
......
......@@ -29,30 +29,41 @@ IF(NOT ${CBLAS_FOUND})
"${CBLAS_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}openblas${CMAKE_STATIC_LIBRARY_SUFFIX}"
CACHE FILEPATH "openblas library." FORCE)
IF(APPLE)
SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -isysroot ${CMAKE_OSX_SYSROOT}")
SET(COMMON_ARGS CC=${OPENBLAS_CC} NO_SHARED=1 NO_LAPACK=1 libs)
ELSE()
SET(COMMON_ARGS CC=${CMAKE_C_COMPILER} NO_SHARED=1 NO_LAPACK=1 libs)
ENDIF()
SET(OPENBLAS_CC "${CMAKE_C_COMPILER}")
IF(CMAKE_CROSSCOMPILING)
SET(OPTIONAL_ARGS HOSTCC=${HOST_C_COMPILER})
GET_FILENAME_COMPONENT(CROSS_SUFFIX ${CMAKE_C_COMPILER} DIRECTORY)
SET(CROSS_SUFFIX ${CROSS_SUFFIX}/)
IF(ANDROID)
# arm_soft_fp_abi branch of OpenBLAS to support softfp
# https://github.com/xianyi/OpenBLAS/tree/arm_soft_fp_abi
SET(OPENBLAS_COMMIT "b5c96fcfcdc82945502a2303116a64d89985daf5")
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
SET(TARGET "ARMV7")
SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV7 ARM_SOFTFP_ABI=1 USE_THREAD=0)
ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
SET(TARGET "ARMV8")
SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV8 BINARY=64 USE_THREAD=0)
ENDIF()
ELSEIF(IOS)
# FIXME(liuyiqun): support multiple architectures
SET(OPENBLAS_COMMIT "b5c96fcfcdc82945502a2303116a64d89985daf5")
SET(OPENBLAS_CC "${OPENBLAS_CC} ${CMAKE_C_FLAGS} -isysroot ${CMAKE_OSX_SYSROOT}")
IF(CMAKE_OSX_ARCHITECTURES MATCHES "armv7")
SET(OPENBLAS_CC "${OPENBLAS_CC} -arch armv7")
SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV7 ARM_SOFTFP_ABI=1 USE_THREAD=0)
ELSEIF(CMAKE_OSX_ARCHITECTURES MATCHES "arm64")
SET(OPENBLAS_CC "${OPENBLAS_CC} -arch arm64")
SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV8 BINARY=64 USE_THREAD=0 CROSS_SUFFIX=${CROSS_SUFFIX})
ENDIF()
SET(OPTIONAL_ARGS HOSTCC=${HOST_C_COMPILER} TARGET=${TARGET} ARM_SOFTFP_ABI=1 USE_THREAD=0)
ELSEIF(RPI)
# use hardfp
SET(OPENBLAS_COMMIT "v0.2.20")
SET(OPTIONAL_ARGS HOSTCC=${HOST_C_COMPILER} TARGET=ARMV7 USE_THREAD=0)
SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV7 USE_THREAD=0)
ENDIF()
ELSE()
IF(APPLE)
SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -isysroot ${CMAKE_OSX_SYSROOT}")
ENDIF()
SET(OPENBLAS_COMMIT "v0.2.20")
SET(OPTIONAL_ARGS "")
IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^x86(_64)?$")
......@@ -60,6 +71,8 @@ IF(NOT ${CBLAS_FOUND})
ENDIF()
ENDIF()
SET(COMMON_ARGS CC=${OPENBLAS_CC} NO_SHARED=1 NO_LAPACK=1 libs)
ExternalProject_Add(
extern_openblas
${EXTERNAL_PROJECT_LOG_ARGS}
......
......@@ -173,7 +173,8 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST)
"-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
"-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
"-Dprotobuf_WITH_ZLIB=ON"
"-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}")
"-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}"
${EXTERNAL_OPTIONAL_ARGS})
SET(OPTIONAL_CACHE_ARGS "-DZLIB_ROOT:STRING=${ZLIB_ROOT}")
ENDIF()
......
......@@ -12,16 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
INCLUDE(ExternalProject)
IF(NOT WITH_PYTHON)
return()
ENDIF()
INCLUDE(python_module)
FIND_PACKAGE(PythonInterp 2.7)
IF(WITH_PYTHON)
FIND_PACKAGE(PythonLibs 2.7)
# Fixme: Maybe find a static library. Get SHARED/STATIC by FIND_PACKAGE.
ADD_LIBRARY(python SHARED IMPORTED GLOBAL)
SET_PROPERTY(TARGET python PROPERTY IMPORTED_LOCATION ${PYTHON_LIBRARIES})
ENDIF(WITH_PYTHON)
FIND_PACKAGE(PythonLibs 2.7)
# Fixme: Maybe find a static library. Get SHARED/STATIC by FIND_PACKAGE.
ADD_LIBRARY(python SHARED IMPORTED GLOBAL)
SET_PROPERTY(TARGET python PROPERTY IMPORTED_LOCATION ${PYTHON_LIBRARIES})
SET(py_env "")
IF(PYTHONINTERP_FOUND)
......@@ -36,9 +37,5 @@ IF(PYTHONINTERP_FOUND)
ENDIF()
ENDIF(PYTHONINTERP_FOUND)
IF(WITH_PYTHON)
INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR})
INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR})
ELSE()
SET(PYTHON_LIBRARIES "")
ENDIF()
INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR})
INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR})
......@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
IF(NOT WITH_SWIG_PY)
return()
ENDIF()
FIND_PACKAGE(SWIG)
IF(NOT SWIG_FOUND)
......
......@@ -16,25 +16,14 @@ INCLUDE(ExternalProject)
SET(WARPCTC_SOURCES_DIR ${THIRD_PARTY_PATH}/warpctc)
SET(WARPCTC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/warpctc)
SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE)
INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR})
SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Directory" FORCE)
IF(WIN32)
SET(WARPCTC_LIBRARIES
"${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE)
ELSE(WIN32)
IF(APPLE)
SET(_warpctc_SHARED_SUFFIX dylib)
ELSE(APPLE)
SET(_warpctc_SHARED_SUFFIX so)
ENDIF(APPLE)
SET(WARPCTC_LIBRARIES
"${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE)
ENDIF(WIN32)
SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include"
CACHE PATH "Warp-ctc Directory" FORCE)
# Used in unit test test_WarpCTCLayer
SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib"
CACHE PATH "Warp-ctc Library Directory" FORCE)
SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/libwarpctc${CMAKE_SHARED_LIBRARY_SUFFIX}"
CACHE FILEPATH "Warp-ctc Library" FORCE)
IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" )
SET(USE_OMP OFF)
......@@ -49,22 +38,26 @@ ExternalProject_Add(
PREFIX ${WARPCTC_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR}
CMAKE_ARGS -DWITH_GPU=${WITH_GPU}
CMAKE_ARGS -DWITH_OMP=${USE_OMP}
CMAKE_ARGS -DWITH_TORCH=OFF
CMAKE_ARGS -DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON
CMAKE_ARGS -DBUILD_SHARED=ON
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR}
-DWITH_GPU=${WITH_GPU}
-DWITH_OMP=${USE_OMP}
-DWITH_TORCH=OFF
-DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON
-DBUILD_SHARED=ON
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE=Release
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_INSTALL_PREFIX:PATH=${WARPCTC_INSTALL_DIR}
)
MESSAGE(STATUS "warp-ctc library: ${WARPCTC_LIBRARIES}")
INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR})
ADD_LIBRARY(warpctc STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET warpctc PROPERTY IMPORTED_LOCATION ${WARPCTC_LIBRARIES})
ADD_DEPENDENCIES(warpctc extern_warpctc)
......
......@@ -34,15 +34,16 @@ ExternalProject_Add(
GIT_TAG "v1.2.8"
PREFIX ${ZLIB_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR}
CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR}
-DBUILD_SHARED_LIBS=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_MACOSX_RPATH=ON
-DCMAKE_BUILD_TYPE=Release
${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ZLIB_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
......
......@@ -128,8 +128,10 @@ set(GPU_COMMON_FLAGS
)
if (APPLE)
# On Mac OS X build fat binaries with x86_64 architectures by default.
set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE)
if(NOT CMAKE_CROSSCOMPILING)
# On Mac OS X build fat binaries with x86_64 architectures by default.
set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE)
endif()
else()
set(GPU_COMMON_FLAGS
-Wall
......
......@@ -24,11 +24,10 @@ IF(WIN32)
SET(HOST_SYSTEM "win32")
ELSE(WIN32)
IF(APPLE)
EXEC_PROGRAM (sw_vers ARGS -productVersion OUTPUT_VARIABLE MACOSX_VERSION)
STRING(REGEX MATCH "[0-9]+.[0-9]+" VERSION "${MACOSX_VERSION}")
SET(MACOS_VERSION ${VERSION})
SET(HOST_SYSTEM "macosx")
IF(NOT DEFINED ENV{MACOSX_DEPLOYMENT_TARGET})
EXEC_PROGRAM(sw_vers ARGS -productVersion OUTPUT_VARIABLE HOST_SYSTEM_VERSION)
STRING(REGEX MATCH "[0-9]+.[0-9]+" MACOS_VERSION "${HOST_SYSTEM_VERSION}")
IF(NOT DEFINED $ENV{MACOSX_DEPLOYMENT_TARGET})
# Set cache variable - end user may change this during ccmake or cmake-gui configure.
SET(CMAKE_OSX_DEPLOYMENT_TARGET ${MACOS_VERSION} CACHE STRING
"Minimum OS X version to target for deployment (at runtime); newer APIs weak linked. Set to empty string for default value.")
......@@ -49,6 +48,8 @@ ELSE(WIN32)
ELSEIF(LINUX_ISSUE MATCHES "Fedora")
SET(HOST_SYSTEM "fedora")
ENDIF()
STRING(REGEX MATCH "(([0-9]+)\\.)+([0-9]+)" HOST_SYSTEM_VERSION "${LINUX_ISSUE}")
ENDIF(EXISTS "/etc/issue")
IF(EXISTS "/etc/redhat-release")
......@@ -70,7 +71,7 @@ CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORES QUERY NUMBER_OF_LOGICAL_CORES)
MARK_AS_ADVANCED(HOST_SYSTEM CPU_CORES)
MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}")
MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}, version: ${HOST_SYSTEM_VERSION}")
MESSAGE(STATUS "Found Paddle host system's CPU: ${CPU_CORES} cores")
# configuration for cross-compiling
......@@ -82,6 +83,9 @@ IF(DEFINED CMAKE_SYSTEM_NAME)
ELSEIF(${CMAKE_SYSTEM_NAME} STREQUAL "RPi")
SET(RPI TRUE)
INCLUDE(cross_compiling/raspberry_pi)
ELSEIF(${CMAKE_SYSTEM_NAME} STREQUAL "iOS")
SET(IOS TRUE)
INCLUDE(cross_compiling/ios)
ENDIF()
ENDIF()
......
......@@ -25,7 +25,9 @@ function(target_circle_link_libraries TARGET_NAME)
endif()
endforeach()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
list(APPEND LIBS "-undefined dynamic_lookup")
if(IOS AND NOT IOS_ENABLE_BITCODE)
list(APPEND LIBS "-undefined dynamic_lookup")
endif()
endif()
list(REVERSE libsInArgn)
target_link_libraries(${TARGET_NAME}
......
# Design Doc: Block and Scope
## The Representation of Computation
Both deep learning systems and programming languages help users describe computation procedures. These systems use various representations of computation:
- Caffe, Torch, and Paddle: sequences of layers.
- TensorFlow, Caffe2, Mxnet: graphs of operators.
- PaddlePaddle: nested blocks, like C++ and Java programs.
## Block in Programming Languages and Deep Learning
In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions, or operators.
Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning:
| programming languages | PaddlePaddle |
|-----------------------|-----------------------|
| for, while loop | RNN, WhileOp |
| if, if-else, switch | IfElseOp, SwitchOp |
| sequential execution | a sequence of layers |
A key difference is that a C++ program describes a one pass computation, whereas a deep learning program describes both the forward and backward passes.
## Stack Frames and the Scope Hierarchy
The existence of the backward makes the execution of a block of traditional programs and PaddlePaddle different to each other:
| programming languages | PaddlePaddle |
|-----------------------|-------------------------------|
| stack | scope hierarchy |
| stack frame | scope |
| push at entering block| push at entering block |
| pop at leaving block | destroy at minibatch completes|
1. In traditional programs:
- When the execution enters the left curly brace of a block, the runtime pushes a frame into the stack, where it realizes local variables.
- After the execution leaves the right curly brace, the runtime pops the frame.
- The maximum number of frames in the stack is the maximum depth of nested blocks.
1. In PaddlePaddle
- When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables.
- PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are to be used by the backward pass. So it has a stack forest known as a *scope hierarchy*.
- The height of the highest tree is the maximum depth of nested blocks.
- After the process of a minibatch, PaddlePaddle destroys the scope hierarchy.
## Use Blocks in C++ and PaddlePaddle Programs
Let us consolidate the discussion by presenting some examples.
### Blocks with `if-else` and `IfElseOp`
The following C++ programs shows how blocks are used with the `if-else` structure:
```c++
int x = 10;
int y = 20;
int out;
bool cond = false;
if (cond) {
int z = x + y;
out = softmax(z);
} else {
int z = fc(x);
out = z;
}
```
An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows:
```python
import paddle as pd
x = var(10)
y = var(20)
cond = var(false)
ie = pd.create_ifelseop(inputs=[x], output_num=1)
with ie.true_block():
x = ie.inputs(true, 0)
z = operator.add(x, y)
ie.set_output(true, 0, operator.softmax(z))
with ie.false_block():
x = ie.inputs(false, 0)
z = layer.fc(x)
ie.set_output(true, 0, operator.softmax(z))
out = b(cond)
```
In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`.
A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values.
### Blocks with `for` and `RNNOp`
The following RNN model from the [RNN design doc](./rnn.md)
```python
x = sequence([10, 20, 30])
m = var(0)
W = tensor()
U = tensor()
rnn = create_rnn(inputs=[input])
with rnn.stepnet() as net:
x = net.set_inputs(0)
h = net.add_memory(init=m)
fc_out = pd.matmul(W, x)
hidden_out = pd.matmul(U, h.pre(n=1))
sum = pd.add_two(fc_out, hidden_out)
act = pd.sigmoid(sum)
h.update(act) # update memory with act
net.set_outputs(0, act, hidden_out) # two outputs
o1, o2 = rnn()
print o1, o2
```
has its equivalent C++ program as follows
```c++
int* x = {10, 20, 30};
int m = 0;
int W = some_value();
int U = some_other_value();
int mem[sizeof(x) / sizeof(x[0]) + 1];
int o1[sizeof(x) / sizeof(x[0]) + 1];
int o2[sizeof(x) / sizeof(x[0]) + 1];
for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) {
int x = x[i-1];
if (i == 1) mem[0] = m;
int fc_out = W * x;
int hidden_out = Y * mem[i-1];
int sum = fc_out + hidden_out;
int act = sigmoid(sum);
mem[i] = act;
o1[i] = act;
o2[i] = hidden_out;
}
print_array(o1);
print_array(o2);
```
## Compilation and Execution
Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference.
The generation of this protobuf message is like what a compiler generates a binary executable file. The execution of the message that the OS executes the binary file.
## The "Binary Executable File Format"
The definition of the protobuf message is as follows:
```protobuf
message BlockDesc {
repeated VarDesc vars = 1;
repeated OpDesc ops = 2;
}
```
The step net in above RNN example would look like
```
BlockDesc {
vars = {
VarDesc {...} // x
VarDesc {...} // h
VarDesc {...} // fc_out
VarDesc {...} // hidden_out
VarDesc {...} // sum
VarDesc {...} // act
}
ops = {
OpDesc {...} // matmul
OpDesc {...} // add_two
OpDesc {...} // sigmoid
}
};
```
Also, the RNN operator in above example is serialized into a protobuf message of type `OpDesc` and would look like:
```
OpDesc {
inputs = {0} // the index of x
outputs = {5, 3} // indices of act and hidden_out
attrs {
"memories" : {1} // the index of h
"step_net" : <above step net>
}
};
```
This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing the global block.
## The Compilation of Blocks
During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator).
VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope.
Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example
```python
a = pd.Varaible(shape=[20, 20])
b = pd.fc(a, params=["fc.w", "fc.b"])
rnn = pd.create_rnn()
with rnn.stepnet() as net:
x = net.set_inputs(a)
# reuse fc's parameter
fc_without_b = pd.get_variable("fc.w")
net.set_outputs(fc_without_b)
out = rnn()
```
the method `pd.get_variable` can help retrieve a Variable by a name, a Variable may store in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance.
In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc.
To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers.
`SymbolTable` can do the following stuff:
- store the definitions (some names and attributes) of variables and operators,
- to verify if a variable was declared,
- to make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers).
```c++
// Information in SymbolTable is enough to trace the dependency graph. So maybe
// the Eval() interface takes a SymbolTable is enough.
class SymbolTable {
public:
SymbolTable(SymbolTable* parent) : parent_(parent) {}
OpDesc* NewOp(const string& name="");
// TODO determine whether name is generated by python or C++
// currently assume that a unique name will be generated by C++ if the
// argument name left default.
VarDesc* NewVar(const string& name="");
// find a VarDesc by name, if recursive true, find parent's SymbolTable
// recursively.
// this interface is introduced to support InferShape, find protobuf messages
// of variables and operators, pass pointers into InferShape.
// operator
//
// NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should
// be proposed and embedded into pybind to enable python operate on C++ pointers.
VarDesc* FindVar(const string& name, bool recursive=true);
OpDesc* FindOp(const string& name);
BlockDesc Compile() const;
private:
SymbolTable* parent_;
map<string, OpDesc> ops_;
map<string, VarDesc> vars_;
};
```
After all the description of variables and operators is added into SymbolTable,
the block has enough information to run.
The `Block` class takes a `BlockDesc` as input, and provide `Run` and `InferShape` functions.
```c++
namespace {
class Block : OperatorBase {
public:
Block(const BlockDesc& desc) desc_(desc) {}
void InferShape(const framework::Scope& scope) const override {
if (!symbols_ready_) {
CreateVariables(scope);
CreateOperators();
}
// should run InferShape first.
for (auto& op : runtime_table_.ops()) {
op->InferShape(scope);
}
}
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
PADDLE_ENFORCE(symbols_ready_, "operators and variables should be created first.");
for (auto& op : runtime_table_.ops()) {
op->Run(scope, dev_ctx);
}
}
void CreateVariables(const framework::Scope& scope);
void CreateOperators();
// some other necessary interfaces of NetOp are list below
// ...
private:
BlockDesc desc_;
bool symbols_ready_{false};
};
```
## The Execution of Blocks
Block inherits from OperatorBase, which has a Run method.
Block's Run method will run its operators sequentially.
There is another important interface called `Eval`, which take some arguments called targets, and generate a minimal graph which takes targets as the end points and creates a new Block,
after `Run`, `Eval` will get the latest value and return the targets.
The definition of Eval is as follows:
```c++
// clean a block description by targets using the corresponding dependency graph.
// return a new BlockDesc with minimal number of operators.
// NOTE not return a Block but the block's description so that this can be distributed
// to a cluster.
BlockDesc Prune(const BlockDesc& desc, vector<string> targets);
void Block::Eval(const vector<string>& targets,
const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) {
BlockDesc min_desc = Prune(desc_, targets);
Block min_block(min_desc);
min_block.Run(scope, dev_ctx);
}
```
# Design Doc: Refactorization Overview
The goal of refactorizaiton include:
1. Make it easy for external contributors to write new elementory computaiton operations.
1. Make the codebase clean and readable.
1. Introduce a new design of computation representation -- a computation graph of operators and variables.
1. The graph representation helps implementing auto-scalable and auto fault recoverable distributed computing.
## Computation Graphs
1. PaddlePaddle represent the computation, training and inference of DL models, by computation graphs.
1. Please dig into [computation graphs](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/graph.md) for a solid example.
1. Users write Python programs to describe the graphs and run it (locally or remotely).
1. A graph is composed of *variabels* and *operators*.
1. The description of graphs must be able to be serialized/deserialized, so it
1. could to be sent to the cloud for distributed execution, and
1. be sent to clients for mobile or enterprise deployment.
1. The Python program do
1. *compilation*: runs a Python program to generate a protobuf message representation of the graph and send it to
1. the C++ library `libpaddle.so` for local execution,
1. the master process of a distributed training job for training, or
1. the server process of a Kubernetes serving job for distributed serving.
1. *execution*: according to the protobuf message, constructs instances of class `Variable` and `OperatorBase`, and run them.
## Description and Realization
At compile time, the Python program generates protobuf message representation of the graph, or the description of the graph.
At runtime, the C++ program realizes the graph and run it.
| | Representation (protobuf messages) | Realization (C++ class objects) |
|---|---|---|
|Data|[VarDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L107)|[Variable](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24)|
|Operation|[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L35)|[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L64)|
|Block|BlockDesc|Block|
The word *graph* is exchangable with *block* in this document. A graph represent computation steps and local variables as a C++/Java program block, or a pair of { and }.
## Compilation and Execution
1. Run an applicaton Python program to describe the graph. In particular,
1. create VarDesc to represent local/intermediate variables,
1. create operators and set attributes,
1. validate attribute values,
1. inference the type and the shape of variables,
1. plan for memory-reuse for variables,
1. generate backward and optimization part of the Graph.
1. possiblly split the graph for distributed training.
1. The invocation of `train` or `infer` in the application Python program:
1. create a new Scope instance in the [scope hierarchy](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) for each run of a block,
1. realize local variables defined in the BlockDesc message in the new scope,
1. a scope is similar to the stack frame in programming languages,
1. create an instance of class `Block`, in which,
1. realize operators in the BlockDesc message,
1. run the Block by calling
1. `Block::Eval(vector<Variable>* targets)` for forward and backward computations, or
1. `Block::Eval(vector<Operator>* targets)` for optimization.
## Intermediate Representation (IR)
```text
Compile Time -> IR -> Runtime
```
### Benefit
- Optimization
```text
Compile Time -> IR -> Optimized IR -> Runtime
```
- Send automatically partitioned IR to different nodes.
- Automatic data parallel
```text
Compile Time
|-> Single GPU IR
|-> [trainer-IR-0, trainer-IR-1, pserver-IR]
|-> Node-0 (runs trainer-IR-0)
|-> Node-1 (runs trainer-IR-1)
|-> Node-2 (runs pserver-IR)
```
- Automatic model parallel (planned for future)
---
# Operator/OpWithKernel/OpKernel
![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/49caf1fb70820fb4a6c217634317c9306f361f36/op_op_with_kern_class_diagram.dot)
---
# Operator
![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/dd598e8f1976f5759f58af5e5ef94738a6b2e661/op.dot)
* `Operator` is the fundamental building block as the user interface.
* Operator stores input/output variable name, and attributes.
* The `InferShape` interface is used to infer output variable shapes by its input shapes.
* Use `Run` to compute `input variables` to `output variables`.
---
# OpWithKernel/Kernel
![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/9d7f4eba185cf41c8e2fbfb40ae21890dbddcd39/op_with_kernel.dot)
* `OpWithKernel` inherits `Operator`.
* `OpWithKernel` contains a Kernel map.
* `OpWithKernel::Run` get device's kernel, and invoke `OpKernel::Compute`.
* `OpKernelKey` is the map key. Only device place now, but may be data type later.
---
# Why separate Kernel and Operator
* Separate GPU and CPU code.
* Make Paddle can run without GPU.
* Make one operator (which is user interface) can contain many implementations.
* Same mul op, different FP16, FP32 Kernel. different MKL, eigen kernel.
---
# Libraries for Kernel development
* `Eigen::Tensor` contains basic math and element-wise functions.
* Note that `Eigen::Tensor` has broadcast implementation.
* Limit number of `tensor.device(dev) = ` in your code.
* `thrust::tranform` and `std::transform`.
* `thrust` has the same API as C++ standard library. Using `transform` can quickly implement a customized elementwise kernel.
* `thrust` has more complex API, like `scan`, `reduce`, `reduce_by_key`.
* Hand-writing `GPUKernel` and `CPU` code
* Do not write `.h`. CPU Kernel should be in `.cc`. CPU kernel should be in `.cu`. (`GCC` cannot compile GPU code.)
---
# Operator Register
## Why register is necessary?
We need a method to build mappings between Op type names and Op classes.
## How to do the register?
Maintain a map, whose key is the type name and value is corresponding Op constructor.
---
# The Registry Map
### `OpInfoMap`
`op_type(string)` -> `OpInfo`
`OpInfo`:
- **`creator`**: The Op constructor.
- **`grad_op_type`**: The type of the gradient Op.
- **`proto`**: The Op's Protobuf, including inputs, outputs and required attributes.
- **`checker`**: Used to check attributes.
---
# Related Concepts
### Op_Maker
It's constructor takes `proto` and `checker`. They are compeleted during Op_Maker's construction. ([ScaleOpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37))
### Register Macros
```cpp
REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class)
REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class)
```
### `USE` Macros
make sure the registration process is executed and linked.
---
# Register Process
1. Write Op class, as well as its gradient Op class if there is.
2. Write Op maker class. In the constructor, describe its inputs, outputs, and attributes.
3. Invoke macro `REGISTER_OP`. The macro will
1. call maker class to complete `proto` and `checker`
2. with the completed `proto` and `checker`, build a new key-value pair in the `OpInfoMap`
4. Invoke `USE` macro in where the Op is used to make sure it is linked.
---
# Backward Module (1/2)
### Create Backward Operator
- Mapping from forwarding Op to backward Op
![backward](https://gist.githubusercontent.com/dzhwinter/a6fbd4623ee76c459f7f94591fd1abf0/raw/61026ab6e518e66bde66a889bc42557a1fccff33/backward.png)
---
# Backward Module (2/2)
### Build Backward Network
- **Input** graph of forwarding operators
- **Output** graph of backward operators
- **corner case in construction**
- shared variable => insert `Add` operator
- no gradient => insert `fill_zero_grad` operator
- recursive netOp => call `Backward` recursively
- RNN Op => recursively call `Backward` on stepnet
---
# Scope, Variable, Tensor
* `Tensor` is an n-dimension array with type.
* Only dims and data pointers are stored in `Tensor`.
* All operators on `Tensor` is written in `Operator` or global functions.
* variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md)
* `Variable` is the inputs and outputs of an operator. Not just `Tensor`.
* step_scopes in RNN is a variable and not a tensor.
* `Scope` is where variables store at.
* map<string/*var name */, Variable>
* `Scope` has a hierarchical structure. The local scope can get variable from its parent scope.
---
# Block (in design)
## the difference with original RNNOp
- as an operator is more intuitive than `RNNOp`,
- offers new interface `Eval(targets)` to deduce the minimal block to `Run`,
- fits the compile-time/ runtime separation design.
- during the compilation, `SymbolTable` stores `VarDesc`s and `OpDesc`s and serialize to a `BlockDesc`
- when graph executes, a Block with `BlockDesc` passed in creates `Op` and `Var` then `Run`
---
# Milestone
- take Paddle/books as the main line, the requirement of the models motivates framework refactoring,
- model migration
- framework development gives **priority support** to model migration, for example,
- the MNIST demo needs a Python interface,
- the RNN models require the framework to support `LoDTensor`.
- determine some timelines,
- heavily-relied Ops need to be migrated first,
- different models can be migrated parallelly.
- improve the framework at the same time
- accept imperfection, concentrated on solving the specific problem at the right price.
---
# Control the migration quality
- compare the performance of migrated models with old ones.
- follow google C style
- build the automatic workflow of generating Python/C++ documentations
- the documentation of layers and ops should be written inside the code
- take the documentation quality into account when doing PR
- preview the documentations, read and improve them from users' perspective
......@@ -321,3 +321,55 @@ pip uninstall py_paddle paddle
然后安装paddle的python环境, 在build目录下执行
pip install python/dist/paddle*.whl && pip install ../paddle/dist/py_paddle*.whl
16. PaddlePaddle存储的参数格式是什么,如何和明文进行相互转化
---------------------------------------------------------
PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数两部分组成。头信息中,1~4字节表示PaddlePaddle版本信息,请直接填充0;5~8字节表示每个参数占用的字节数,当保存的网络参数为float类型时为4,double类型时为8;9~16字节表示保存的参数总个数。
将PaddlePaddle保存的模型参数还原回明文时,可以使用相应数据类型的 :code:`numpy.array` 加载具体网络参数,此时可以跳过PaddlePaddle模型参数文件的头信息。若在PaddlePaddle编译时,未指定按照double精度编译,默认情况下按照float精度计算,保存的参数也是float类型。这时在使用 :code:`numpy.array` 时,一般设置 :code:`dtype=float32` 。示例如下:
.. code-block:: python
def read_parameter(fname, width):
s = open(fname).read()
# skip header
vec = np.fromstring(s[16:], dtype=np.float32)
# width is the size of the corresponding layer
np.savetxt(fname + ".csv", vec.reshape(width, -1),
fmt="%.6f", delimiter=",")
将明文参数转化为PaddlePaddle可加载的模型参数时,首先构造头信息,再写入网络参数。下面的代码将随机生成的矩阵转化为可以被PaddlePaddle加载的模型参数。
.. code-block:: python
def gen_rand_param(param_file, width, height, need_trans):
np.random.seed()
header = struct.pack("iil", 0, 4, height * width)
param = np.float32(np.random.rand(height, width))
with open(param_file, "w") as fparam:
fparam.write(header + param.tostring())
17. 如何加载预训练参数
------------------------------
* 对加载预训练参数的层,设置其参数属性 :code:`is_static=True`,使该层的参数在训练过程中保持不变。以embedding层为例,代码如下:
.. code-block:: python
emb_para = paddle.attr.Param(name='emb', is_static=True)
paddle.layer.embedding(size=word_dim, input=x, param_attr=emb_para)
* 从模型文件将预训练参数载入 :code:`numpy.array`,在创建parameters后,使用 :code:`parameters.set()` 加载预训练参数。PaddlePaddle保存的模型参数文件前16字节为头信息,用户将参数载入 :code:`numpy.array` 时须从第17字节开始。以embedding层为例,代码如下:
.. code-block:: python
def load_parameter(file_name, h, w):
with open(file_name, 'rb') as f:
f.read(16) # skip header.
return np.fromfile(f, dtype=np.float32).reshape(h, w)
parameters = paddle.parameters.create(my_cost)
parameters.set('emb', load_parameter(emb_param_file, 30000, 256))
......@@ -19,7 +19,7 @@ if(Boost_FOUND)
endif()
if(WITH_C_API)
add_subdirectory(capi)
add_subdirectory(capi)
endif()
if(WITH_SWIG_PY)
......
......@@ -28,42 +28,38 @@ add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER}
add_dependencies(paddle_capi paddle_proto)
# combine all paddle static libraries together, into libpaddle_capi_whole.a
# user should use PaddleCAPI as -lpaddle_capi_whole
set(capi_whole_library libpaddle_capi_whole.a)
add_custom_target(paddle_capi_whole ALL
COMMAND mkdir -p o_files/capi && cd o_files/capi/ && ar -x $<TARGET_FILE:paddle_capi>
COMMAND mkdir -p o_files/utils && cd o_files/utils/ && ar -x $<TARGET_FILE:paddle_utils>
COMMAND mkdir -p o_files/parameter && cd o_files/parameter/ && ar -x $<TARGET_FILE:paddle_parameter>
COMMAND mkdir -p o_files/math && cd o_files/math/ && ar -x $<TARGET_FILE:paddle_math>
COMMAND mkdir -p o_files/cuda && cd o_files/cuda/ && ar -x $<TARGET_FILE:paddle_cuda>
COMMAND mkdir -p o_files/function && cd o_files/function/ && ar -x $<TARGET_FILE:paddle_function>
COMMAND mkdir -p o_files/gserver && cd o_files/gserver/ && ar -x $<TARGET_FILE:paddle_gserver>
COMMAND mkdir -p o_files/proto && cd o_files/proto/ && ar -x $<TARGET_FILE:paddle_proto>
COMMAND mkdir -p o_files/network && cd o_files/network/ && ar -x $<TARGET_FILE:paddle_network>
COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x $<TARGET_FILE:paddle_pserver>
COMMAND ar crs ${capi_whole_library} `find ./o_files -name '*.o'`
COMMAND rm -rf o_files
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS paddle_capi paddle_utils paddle_parameter paddle_math
paddle_cuda paddle_function paddle_gserver
paddle_proto paddle_pserver paddle_network
)
set_target_properties(paddle_capi_whole
PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library})
set(PADDLE_CAPI_INFER_LIBS
paddle_utils
paddle_parameter
paddle_math
paddle_cuda
paddle_function
paddle_gserver
paddle_proto
paddle_pserver
paddle_network)
cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
set(LINK_FLAGS " -Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/export.sym -Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/export.map")
# TODO: merge mkl into paddle_capi_shared
add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
link_paddle_exe(paddle_capi_shared)
# No shared library for iOS
if(NOT IOS)
set(LINK_FLAGS " -Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/export.sym -Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/export.map")
# TODO: merge mkl into paddle_capi_shared
add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
link_paddle_exe(paddle_capi_shared)
endif()
# install library & headers.
install(FILES ${CAPI_HEADERS} DESTINATION include/paddle)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle)
if(ANDROID)
install(TARGETS paddle_capi_whole paddle_capi_shared
ARCHIVE DESTINATION lib/${ANDROID_ABI}
LIBRARY DESTINATION lib/${ANDROID_ABI})
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1
OUTPUT_VARIABLE GIT_COMMITS_LIST
......@@ -72,9 +68,6 @@ if(ANDROID)
if(${GIT_COMMITS_LIST_RESULT})
set(GIT_COMMITS_LIST "No commits.")
endif()
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library}
DESTINATION lib/${ANDROID_ABI})
install(TARGETS paddle_capi_shared DESTINATION lib/${ANDROID_ABI})
install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt
\"Compiler:\n\"
\"\\t${CMAKE_C_COMPILER}\\n\"
......@@ -88,8 +81,11 @@ if(ANDROID)
)"
)
else(ANDROID)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib)
install(TARGETS paddle_capi_shared DESTINATION lib)
install(TARGETS paddle_capi_whole
ARCHIVE DESTINATION lib)
if(NOT IOS)
install(TARGETS paddle_capi_shared DESTINATION lib)
endif()
endif(ANDROID)
# this variable used for unittest
......
......@@ -22,10 +22,10 @@ limitations under the License. */
*/
typedef enum {
HL_POOLING_MAX = 0,
// average includes padded values
HL_POOLING_AVERAGE = 1,
// average does not include padded values
HL_POOLING_AVERAGE_EXCLUDE_PADDING = 2,
HL_POOLING_AVERAGE = 1,
// average includes padded values
HL_POOLING_AVERAGE_INCLUDE_PADDING = 2,
HL_POOLING_END
} hl_pooling_mode_t;
......
......@@ -461,7 +461,7 @@ class add<float32x4_t> {
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
return vmulq_f32(a, b);
return vaddq_f32(a, b);
}
};
......
......@@ -211,13 +211,11 @@ __global__ void KeAvgPoolForward(const int nthreads,
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int hend = min(hstart + sizeY, height + padH);
int wend = min(wstart + sizeX, width + padW);
int pool_size = (hend - hstart) * (wend - wstart);
int hend = min(hstart + sizeY, height);
int wend = min(wstart + sizeX, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
int pool_size = (hend - hstart) * (wend - wstart);
real aveval = 0;
inputData += (frameNum * channels + c) * height * width;
......@@ -299,12 +297,14 @@ __global__ void KeAvgPoolBackward(const int nthreads,
outGrad += (frameNum * outStride + offsetC * pooledH * pooledW);
for (int ph = phstart; ph < phend; ++ph) {
int hstart = ph * strideH - padH;
int hend = min(hstart + sizeY, height);
hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int hend = min(hstart + sizeY, height + padH);
int wend = min(wstart + sizeX, width + padW);
int wend = min(wstart + sizeX, width);
wstart = max(wstart, 0);
int poolsize = (hend - hstart) * (wend - wstart);
gradient += outGrad[ph * pooledW + pw] / poolsize;
}
......@@ -600,16 +600,13 @@ __global__ void KeAvgPool3DForward(const int nthreads,
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int dend = min(dstart + sizeZ, depth + padD);
int hend = min(hstart + sizeY, height + padH);
int wend = min(wstart + sizeX, width + padW);
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
int dend = min(dstart + sizeZ, depth);
int hend = min(hstart + sizeY, height);
int wend = min(wstart + sizeX, width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
dend = min(dend, depth);
hend = min(hend, height);
wend = min(wend, width);
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
real aveval = 0;
inputData += (frameNum * channels + c) * depth * height * width;
......@@ -712,15 +709,18 @@ __global__ void KeAvgPool3DBackward(const int nthreads,
outGrad += (frameNum * channels + offsetC) * pooledD * pooledH * pooledW;
for (int pd = pdstart; pd < pdend; ++pd) {
int dstart = pd * strideD - padD;
int dend = min(dstart + sizeZ, depth);
dstart = max(dstart, 0);
for (int ph = phstart; ph < phend; ++ph) {
int hstart = ph * strideH - padH;
int hend = min(hstart + sizeY, height);
hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int dend = min(dstart + sizeZ, depth + padD);
int hend = min(hstart + sizeY, height + padH);
int wend = min(wstart + sizeX, width + padW);
int wend = min(wstart + sizeX, width);
wstart = max(wstart, 0);
int poolsize = (dend - dstart) * (hend - hstart) * (wend - wstart);
gradient += outGrad[(pd * pooledH + ph) * pooledW + pw] / poolsize;
}
......
......@@ -432,11 +432,11 @@ void hl_create_pooling_descriptor(hl_pooling_descriptor* pooling_desc,
cudnn_mode = CUDNN_POOLING_MAX;
break;
case HL_POOLING_AVERAGE:
cudnn_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
break;
case HL_POOLING_AVERAGE_EXCLUDE_PADDING:
cudnn_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
break;
case HL_POOLING_AVERAGE_INCLUDE_PADDING:
cudnn_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
break;
default:
LOG(FATAL) << "parameter mode error";
}
......
......@@ -4,13 +4,13 @@ PaddlePaddle's RNN doesn't require that all instances have the same length. To
## Challenge of Variable-length Inputs
People usually represent a mini-batch by a Tensor. For example, a mini-batch of 32 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 32x32xO-dimensional tensor T and the 10x32x32 Tensor.
People usually represent a mini-batch by a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 32x32xO-dimensional tensor T and the 10x32x32 Tensor.
Another example is that each mini-batch contains 32 sentences, where each word is a D-dimensional one-hot vector. If all sentences have the same length L, we can represent this mini-batch by a 32xLxD tensor. However, in most cases, sentences have variable lengths, and we will need an index data structure to record these variable lengths.
## LoD as a Solution
### Mini-Batch of variable-length sentenses
### Mini-Batch of variable-length sentences
Let's imagine a mini-batch of 3 variable lengths sentences, containing 3, 1, and 2 words respectively. We can represent it by a (3+1+2)xD tensor plus some index information:
......@@ -51,17 +51,17 @@ The many 1's on the second level seem duplicated. For this particular case of 2
In summary, as long as that the essential elements (words or images) have the same size, we can represent mini-batches by a LoD Tensor:
- The underlying tensor has size LxD1xD2x..., where D1xD2... is the size of the essential elements, and
- the first dimension size L has an additon property -- a LoD index as a nested vector:
- The first dimension size L has an additonal property -- a LoD index as a nested vector:
```c++
typedef std::vector<std::vector> > LoD;
```
- The LoD index can is not necessary when there are only two levels and all elements of the second level have length 1.
- The LoD index is not necessary when there are only two levels and all elements of the second level have length 1.
## Slicing of LoD Tensor
Consider that we have a network with three levels of RNN: the top level one handles articles, the second level one handles sentences, and the basic level one handles words. This network requires that mini-batches represented by 4 level LoD Tensor, for example,
Consider that we have a network with three levels of RNN: the top level one handles articles, the second level one handles sentences, and the basic level one handles words. This network requires that mini-batches represented by 3 level LoD Tensor, for example,
```
3
......@@ -90,8 +90,9 @@ and the <1,2>-slice of above example is
Let's go on slicing this slice. Its <1,1>-slice is
```
3
|||
1
1
|
```
### The Slicing Algorithm
......@@ -128,7 +129,7 @@ Suppose that we want to retrieve the <1,2>-slice
we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10.
To avoid the traversal of the LoD tree at slcing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into
To avoid the traversal of the LoD tree at slicing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into
```
0
......
......@@ -22,14 +22,14 @@ namespace framework {
template <>
Eigen::DefaultDevice& ExecutionContext::GetEigenDevice<
platform::CPUPlace, Eigen::DefaultDevice>() const {
return *device_context_->get_eigen_device<Eigen::DefaultDevice>();
return *device_context_.get_eigen_device<Eigen::DefaultDevice>();
}
#ifndef PADDLE_ONLY_CPU
template <>
Eigen::GpuDevice&
ExecutionContext::GetEigenDevice<platform::GPUPlace, Eigen::GpuDevice>() const {
return *device_context_->get_eigen_device<Eigen::GpuDevice>();
return *device_context_.get_eigen_device<Eigen::GpuDevice>();
}
#endif
......
......@@ -366,7 +366,7 @@ struct EigenDeviceConverter<platform::GPUPlace> {
class ExecutionContext : public InferShapeContext {
public:
ExecutionContext(const OperatorBase& op, const Scope& scope,
const platform::DeviceContext* device_context)
const platform::DeviceContext& device_context)
: InferShapeContext(op, scope), device_context_(device_context) {}
template <typename PlaceType,
......@@ -374,9 +374,9 @@ class ExecutionContext : public InferShapeContext {
typename EigenDeviceConverter<PlaceType>::EigenDeviceType>
DeviceType& GetEigenDevice() const;
platform::Place GetPlace() const { return device_context_->GetPlace(); }
platform::Place GetPlace() const { return device_context_.GetPlace(); }
const platform::DeviceContext* device_context() const {
const platform::DeviceContext& device_context() const {
return device_context_;
}
......@@ -401,7 +401,8 @@ class ExecutionContext : public InferShapeContext {
return res;
}
const platform::DeviceContext* device_context_;
private:
const platform::DeviceContext& device_context_;
};
template <>
......@@ -461,7 +462,7 @@ class OperatorWithKernel : public OperatorBase {
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const final {
auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx));
opKernel->Compute(ExecutionContext(*this, scope, &dev_ctx));
opKernel->Compute(ExecutionContext(*this, scope, dev_ctx));
}
static std::unordered_map<std::string /* op_type */, OpKernelMap>&
......
......@@ -52,7 +52,7 @@ public:
int outputHeight = output[2];
int outputWidth = output[3];
int filterMultiplier = outputChannels / groups_;
CHECK_EQ(inputChannels, groups_);
CHECK_EQ(static_cast<size_t>(inputChannels), groups_);
// only support strideH() == strideW() and filterHeight == filterWidth.
CHECK_EQ(strideH(), strideW());
......
......@@ -22,9 +22,12 @@ limitations under the License. */
#include <type_traits>
#include "paddle/parameter/Argument.h"
#include "paddle/utils/ClassRegistrar.h"
#include "paddle/utils/Logging.h"
#ifdef PADDLE_USE_MKLDNN
#include "MKLDNNActivation.h"
#endif
namespace paddle {
static ClassRegistrar<ActivationFunction> gActivationRegistrar;
......@@ -456,6 +459,12 @@ Error __must_check backward(Argument& act) {
END_DEFINE_ACTIVATION(log)
ActivationFunction* ActivationFunction::create(const std::string& type) {
#ifdef PADDLE_USE_MKLDNN
if (!type.empty() && type.compare(0, 7, "mkldnn_") == 0) {
return MKLDNNActivation::create(type);
}
#endif
return gActivationRegistrar.createByType(type);
}
......
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MKLDNNActivation.h"
#include "mkldnn.hpp"
#include "paddle/utils/ClassRegistrar.h"
namespace paddle {
static ClassRegistrar<ActivationFunction> gMKLDNNActivationRegistrar;
/**
* @def MKLDNN_ACTIVATION_CLASS_NAME
* @note MKLDNN_ACTIVATION_CLASS_NAME(relu) relu_;
* means mkldnn_reluActivation relu_;
*/
#define MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE) mkldnn_##ACT_TYPE##Activation
/**
* @def DEFINE_MKLDNN_ELTWISE_ACTIVATION
*/
#define DEFINE_MKLDNN_ELTWISE_ACTIVATION(ACT_TYPE, ALPHA, BWD_ALPHA) \
class MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE) \
: public MKLDNNEltwiseActivation { \
private: \
static const std::string name; \
static const float alpha; \
static const float bwdAlpha; \
\
public: \
const std::string& getName() const { return name; } \
float getAlpha() const { return alpha; } \
float getBwdAlpha() const { return bwdAlpha; } \
}; \
const std::string MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::name = \
"mkldnn_" #ACT_TYPE; \
const float MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::alpha = ALPHA; \
const float MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::bwdAlpha = BWD_ALPHA; \
static InitFunction __reg_activation__mkldnn_##ACT_TYPE([] { \
gMKLDNNActivationRegistrar \
.registerClass<MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)>( \
"mkldnn_" #ACT_TYPE); \
});
/**
* @brief MKLDNN Relu Activation.
* Actually mkldnn_relu is Leaky Relu.
* f(x) = x (x >= 0)
* f(x) = negative_slope * x (x < 0)
* @note the negative_slope should be -0.f in forward
*/
DEFINE_MKLDNN_ELTWISE_ACTIVATION(relu, -0.f, 0.f)
/**
* @brief MKLDNN Tanh Activation.
*/
DEFINE_MKLDNN_ELTWISE_ACTIVATION(tanh, 0.f, 0.f)
/**
* @brief MKLDNN ELU(Exponential Linear Unit) Activation.
* f(x) = x (x >= 0)
* f(x) = negative_slope * (exp(x) - 1) (x < 0)
*/
DEFINE_MKLDNN_ELTWISE_ACTIVATION(elu, 0.f, 0.f)
ActivationFunction* MKLDNNActivation::create(const std::string& type) {
return gMKLDNNActivationRegistrar.createByType(type);
}
std::vector<std::string> MKLDNNActivation::getAllRegisteredTypes() {
std::vector<std::string> types;
gMKLDNNActivationRegistrar.forEachType(
[&](const std::string& type) { types.push_back(type); });
return types;
}
} // namespace paddle
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "ActivationFunction.h"
#include "mkldnn.hpp"
#include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/math/MKLDNNMatrix.h"
#include "paddle/parameter/Argument.h"
namespace paddle {
/**
* @brief Base class of MKLDNN Activation.
* Common activation function are provieded,
* including mkldnn_relu, mkldnn_elu, mkldnn_tanh, mkldnn_softmax
*/
class MKLDNNActivation : public ActivationFunction {
protected:
// input value element count
size_t cnt_;
// should not merge the resetBwd into resetFwd,
// because the grad data would be changing before backward.
bool needResetBwd_;
// mkldnn matrix, primitive, stream and pipeline
MKLDNNMatrixPtr val_;
MKLDNNMatrixPtr grad_;
std::shared_ptr<MKLDNNStream> stream_;
std::shared_ptr<mkldnn::primitive> fwd_;
std::shared_ptr<mkldnn::primitive> bwd_;
std::vector<mkldnn::primitive> pipelineFwd_;
std::vector<mkldnn::primitive> pipelineBwd_;
public:
MKLDNNActivation() : cnt_(0), needResetBwd_(true) {}
~MKLDNNActivation() {}
static ActivationFunction* create(const std::string& type);
static std::vector<std::string> getAllRegisteredTypes();
virtual const std::string& getName() const = 0;
virtual Error __must_check forward(Argument& act) = 0;
virtual Error __must_check backward(Argument& act) = 0;
};
/**
* @brief Base class of MKLDNN Eltwise Activation,
* includes mkldnn_relu, mkldnn_elu and mkldnn_tanh.
*/
class MKLDNNEltwiseActivation : public MKLDNNActivation {
typedef mkldnn::eltwise_forward eltwise_fwd;
typedef mkldnn::eltwise_backward eltwise_bwd;
protected:
// save the forward primitive desc, which can be used backward
std::shared_ptr<eltwise_fwd::primitive_desc> fwdPD_;
// eltwise_bwd need src input value
MKLDNNMatrixPtr inVal_;
// use for copy data
std::shared_ptr<mkldnn::reorder> copyInVal_;
public:
MKLDNNEltwiseActivation() {}
~MKLDNNEltwiseActivation() {}
virtual const std::string& getName() const = 0;
// in common, the alpha of forward and backward should be equal.
// but for relu, to avoid negative value, they should be opposite
virtual float getAlpha() const = 0;
virtual float getBwdAlpha() const = 0;
virtual float getBeta() const { return 0.f; }
virtual mkldnn::algorithm getAlgo(const std::string& type) const {
if (type == "mkldnn_relu") {
return mkldnn::algorithm::eltwise_relu;
} else if (type == "mkldnn_tanh") {
return mkldnn::algorithm::eltwise_tanh;
} else if (type == "mkldnn_elu") {
return mkldnn::algorithm::eltwise_elu;
} else {
LOG(FATAL) << "Unkown eltwise activation type: " << type;
}
return (mkldnn::algorithm)0;
}
/**
* reshape and reset the forward primitives
*/
void resetFwd(Argument& act) {
if (cnt_ == act.value->getElementCnt()) {
return;
}
cnt_ = act.value->getElementCnt();
stream_.reset(new MKLDNNStream());
auto eng = CPUEngine::Instance().getEngine();
// get algo setting
mkldnn::algorithm algo = getAlgo(this->getName());
// note: alpha represents the NegativeSlope when used in relu.
float alpha = getAlpha();
float beta = getBeta();
/// forward
pipelineFwd_.clear();
val_ = std::dynamic_pointer_cast<MKLDNNMatrix>(act.value);
if (val_ == nullptr) {
int bs = act.getBatchSize();
int ih = act.getFrameHeight() > 0 ? act.getFrameHeight() : 1;
int iw = act.getFrameWidth() > 0 ? act.getFrameWidth() : 1;
int ic = cnt_ / bs / ih / iw;
CHECK_EQ(cnt_, (size_t)bs * ic * ih * iw);
val_ = MKLDNNMatrix::create(
act.value, {bs, ic, ih, iw}, mkldnn::memory::format::nchw, eng);
CHECK(val_);
}
auto fwdDesc = eltwise_fwd::desc(mkldnn::prop_kind::forward_training,
algo,
val_->getMemoryDesc(),
alpha,
beta);
fwdPD_.reset(new eltwise_fwd::primitive_desc(fwdDesc, eng));
// use inplace for forward but save input value before submit
inVal_ = val_;
if (act.grad) {
// only copy when need do backward
inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc());
copyInVal_ = std::make_shared<mkldnn::reorder>(*val_, *inVal_);
CHECK(copyInVal_) << "should not be emptry";
pipelineFwd_.push_back(*copyInVal_);
}
fwd_.reset(new eltwise_fwd(*fwdPD_, *val_, *val_));
pipelineFwd_.push_back(*fwd_);
needResetBwd_ = true;
}
/**
* reset the backward primitives, can not merge into resetFwd as the grad data
* would be changing before backward.
*/
void resetBwd(Argument& act) {
if (!needResetBwd_) {
return;
}
needResetBwd_ = false;
mkldnn::algorithm algo = getAlgo(this->getName());
float alpha = getBwdAlpha();
float beta = getBeta();
grad_ = MKLDNNMatrix::create(act.grad, val_->getPrimitiveDesc());
auto eng = CPUEngine::Instance().getEngine();
auto bwdDesc = eltwise_bwd::desc(
algo, grad_->getMemoryDesc(), val_->getMemoryDesc(), alpha, beta);
auto bwdPD = eltwise_bwd::primitive_desc(bwdDesc, eng, *fwdPD_);
CHECK(inVal_);
bwd_.reset(new eltwise_bwd(bwdPD, *inVal_, *grad_, *grad_));
pipelineBwd_.clear();
pipelineBwd_.push_back(*bwd_);
}
Error __must_check forward(Argument& act) {
resetFwd(act);
stream_->submit(pipelineFwd_);
return Error();
}
Error __must_check backward(Argument& act) {
resetBwd(act);
stream_->submit(pipelineBwd_);
return Error();
}
};
} // namespace paddle
......@@ -29,9 +29,9 @@ bool CudnnPoolLayer::typeCheck(const std::string &poolType,
if (mode) {
*mode = HL_POOLING_AVERAGE;
}
} else if (poolType == "cudnn-avg-excl-pad-pool") {
} else if (poolType == "cudnn-avg-incl-pad-pool") {
if (mode) {
*mode = HL_POOLING_AVERAGE_EXCLUDE_PADDING;
*mode = HL_POOLING_AVERAGE_INCLUDE_PADDING;
}
} else {
return false;
......
......@@ -143,7 +143,7 @@ void DetectionOutputLayer::forward(PassType passType) {
resetOutput(numKept, 7);
} else {
MatrixPtr outV = getOutputValue();
outV = NULL;
if (outV) outV->resize(0, 0);
return;
}
MatrixPtr outV = getOutputValue();
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ExpandConvBaseLayer.h"
#include "paddle/utils/Logging.h"
namespace paddle {
bool ExpandConvBaseLayer::init(const LayerMap &layerMap,
const ParameterMap &parameterMap) {
/* Initialize the basic convolutional parent class */
ConvBaseLayer::init(layerMap, parameterMap);
int index = 0;
for (auto &inputConfig : config_.inputs()) {
const ConvConfig &conf = inputConfig.conv_conf();
/* Consistent caffe mode for multiple input */
caffeMode_ = conf.caffe_mode();
// create a new weight
size_t height, width;
height = filterPixels_[index] * filterChannels_[index];
width = (!isDeconv_) ? numFilters_ : channels_[index];
CHECK_EQ(parameters_[index]->getSize(), width * height);
Weight *w = new Weight(height, width, parameters_[index]);
weights_.emplace_back(w);
index++;
}
if (biasParameter_.get()) {
if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ =
std::unique_ptr<Weight>(new Weight(numFilters_, 1, biasParameter_));
} else {
biases_ =
std::unique_ptr<Weight>(new Weight(getSize(), 1, biasParameter_));
}
}
getOutputSize();
return true;
}
size_t ExpandConvBaseLayer::getOutputSize() {
CHECK_NE(inputLayers_.size(), 0UL);
size_t layerSize = ConvBaseLayer::calOutputSize();
return layerSize;
}
void ExpandConvBaseLayer::addSharedBias() {
size_t mapW = getOutputSize() / numFilters_;
size_t mapH = getOutputValue()->getElementCnt() / mapW;
MatrixPtr out =
Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_);
Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
out->transpose(transOutValue_, false); // false means no memory allocation
transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
numFilters_);
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
transOutValue_->addBias(*bias, 1.0f);
transOutValue_->reshape(mapW, mapH);
transOutValue_->transpose(out, false); // false means no memory allocation
out->clear();
bias->clear();
}
void ExpandConvBaseLayer::addUnsharedBias() {
MatrixPtr outValue = getOutputValue();
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
outValue->addBias(*bias, 1.0f);
}
void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) {
size_t mapW = getOutputSize() / numFilters_;
size_t mapH = v->getElementCnt() / mapW;
MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_);
Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
vTmp->transpose(transOutValue_, false); // false means no memory allocation
transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
numFilters_);
biases->collectBias(*transOutValue_, 1.0f);
}
void ExpandConvBaseLayer::bpropBiases(MatrixPtr v) {
MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(),
1,
biases_->getWGrad()->getElementCnt(),
false,
useGpu_);
if (sharedBiases_) {
bpropSharedBias(biases, v);
} else {
biases->collectBias(*v, 1.0f);
}
biases->clear();
}
} // namespace paddle
......@@ -36,7 +36,36 @@ inline bool isDepthwiseConv(int channels, int groups) {
bool ExpandConvLayer::init(const LayerMap &layerMap,
const ParameterMap &parameterMap) {
/* Initialize the basic convolutional parent class */
ExpandConvBaseLayer::init(layerMap, parameterMap);
ConvBaseLayer::init(layerMap, parameterMap);
int index = 0;
for (auto &inputConfig : config_.inputs()) {
const ConvConfig &conf = inputConfig.conv_conf();
/* Consistent caffe mode for multiple input */
caffeMode_ = conf.caffe_mode();
// create a new weight
size_t height, width;
height = filterPixels_[index] * filterChannels_[index];
width = (!isDeconv_) ? numFilters_ : channels_[index];
CHECK_EQ(parameters_[index]->getSize(), width * height);
Weight *w = new Weight(height, width, parameters_[index]);
weights_.emplace_back(w);
index++;
}
if (biasParameter_.get()) {
if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ = std::unique_ptr<Weight>(
new Weight(1, numFilters_, biasParameter_, 0));
} else {
biases_ =
std::unique_ptr<Weight>(new Weight(1, getSize(), biasParameter_, 0));
}
}
getOutputSize();
size_t numInputs = config_.inputs_size();
inputShape_.resize(numInputs);
......@@ -108,6 +137,12 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
return true;
}
size_t ExpandConvLayer::getOutputSize() {
CHECK_NE(inputLayers_.size(), 0UL);
size_t layerSize = ConvBaseLayer::calOutputSize();
return layerSize;
}
// i is the index of input layers
#define BACKWARD_INPUT(i, inputs, outputs) \
backward_[2 * i]->calc(inputs, outputs)
......@@ -155,11 +190,7 @@ void ExpandConvLayer::forward(PassType passType) {
/* add the bias-vector */
if (biases_.get()) {
if (sharedBiases_) {
addSharedBias();
} else {
addUnsharedBias();
}
output_.value->addBias(*biases_->getW(), 1.0, sharedBiases_);
}
/* activation */
......@@ -171,7 +202,7 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) {
MatrixPtr outGrad = getOutputGrad();
if (biases_ && biases_->getWGrad()) {
bpropBiases(outGrad);
biases_->getWGrad()->collectBias(*getOutputGrad(), 1, sharedBiases_);
/* Increasing the number of gradient */
biases_->getParameterPtr()->incUpdate(callback);
}
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#include <vector>
#include "ExpandConvBaseLayer.h"
#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
namespace paddle {
......@@ -28,10 +28,9 @@ namespace paddle {
* The config file api is img_conv_layer.
*/
class ExpandConvLayer : public ExpandConvBaseLayer {
class ExpandConvLayer : public ConvBaseLayer {
public:
explicit ExpandConvLayer(const LayerConfig& config)
: ExpandConvBaseLayer(config) {}
explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~ExpandConvLayer() {}
......@@ -41,6 +40,8 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
size_t getOutputSize();
protected:
std::vector<TensorShape> inputShape_;
std::vector<TensorShape> filterShape_;
......
......@@ -14,26 +14,12 @@ limitations under the License. */
#include "paddle/utils/Util.h"
#include "CostLayer.h"
#include "ValidationLayer.h"
#include "paddle/math/SparseMatrix.h"
#include "paddle/utils/Error.h"
#include "paddle/utils/Logging.h"
#include "AddtoLayer.h"
#include "CRFLayer.h"
#include "CosSimLayer.h"
#include "CostLayer.h"
#include "DataLayer.h"
#include "ExpandConvLayer.h"
#include "FullyConnectedLayer.h"
#include "HierarchicalSigmoidLayer.h"
#include "MaxLayer.h"
#include "MixedLayer.h"
#include "NormLayer.h"
#include "PoolLayer.h"
#include "TensorLayer.h"
#include "TransLayer.h"
#include "ValidationLayer.h"
DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
namespace paddle {
......@@ -109,6 +95,10 @@ ClassRegistrar<Layer, LayerConfig> Layer::registrar_;
LayerPtr Layer::create(const LayerConfig& config) {
std::string type = config.type();
// NOTE: As following types have illegal character '-',
// they can not use REGISTER_LAYER to registrar.
// Besides, to fit with old training models,
// they can not use '_' instead.
if (type == "multi-class-cross-entropy")
return LayerPtr(new MultiClassCrossEntropy(config));
else if (type == "rank-cost")
......@@ -117,8 +107,6 @@ LayerPtr Layer::create(const LayerConfig& config) {
return LayerPtr(new AucValidation(config));
else if (type == "pnpair-validation")
return LayerPtr(new PnpairValidation(config));
// NOTE: stop adding "if" statements here.
// Instead, use REGISTER_LAYER to add more layer types
return LayerPtr(registrar_.createByType(config.type(), config));
}
......
......@@ -285,22 +285,18 @@ void MKLDNNConvLayer::resetWgtBiasValue(
wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc());
VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat();
bias = nullptr;
if (biases_ && biases_->getW()) {
bias = MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc());
}
bias = (biases_ && biases_->getW())
? MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc())
: nullptr;
}
void MKLDNNConvLayer::resetOutValue(
std::shared_ptr<conv_fwd::primitive_desc>& pd, MKLDNNMatrixPtr& out) {
out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc());
// change original output value from cpu matrix to mkldnn matrix
output_.value = std::dynamic_pointer_cast<Matrix>(out);
// create reorder if output value has cpu device and pd do not match
cpuOutVal_ = nullptr;
cpuOutVal_ = nullptr;
cvtOutVal_ = nullptr;
if (!outputIsOnlyMKLDNN()) {
const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value;
memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
......@@ -356,6 +352,7 @@ void MKLDNNConvLayer::resetBwdWgtPD(
void MKLDNNConvLayer::resetBwdDataPD(
std::shared_ptr<conv_bwdData::primitive_desc>& pd) {
pd = nullptr;
if (inputLayers_[0]->getOutput().grad == nullptr) {
return;
}
......@@ -476,6 +473,7 @@ void MKLDNNConvLayer::resetWgtBiasGrad(
<< "primitive desc of weight grad and value should be equal";
VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat();
bias = nullptr;
if (biasVal_ == nullptr) {
return;
}
......
......@@ -17,9 +17,6 @@ limitations under the License. */
using namespace mkldnn; // NOLINT
typedef memory::format format;
typedef inner_product_forward fc_fwd;
typedef inner_product_backward_weights fc_bwdWgt;
typedef inner_product_backward_data fc_bwdData;
namespace paddle {
......@@ -93,82 +90,146 @@ void MKLDNNFcLayer::reshape(
printSizeInfo();
}
void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
void MKLDNNFcLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
bool hasBias = biases_ && biases_->getW();
const MatrixPtr& wgtVal = weight_->getW();
const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr;
const MatrixPtr& outVal = output_.value;
resetFwdBuffers(in, wgt, bias, out);
resetFwdPD(fwdPD_, in, wgt, bias, out);
resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
printValueFormatFlow();
}
void MKLDNNFcLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
std::shared_ptr<fc_bwdWgt::primitive_desc> bwdWgtPD;
std::shared_ptr<fc_bwdData::primitive_desc> bwdDataPD;
resetBwdBuffers(in, wgt, bias, out);
resetBwdWgtPD(bwdWgtPD, wgt, bias, out);
resetBwdDataPD(bwdDataPD, in, out);
resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
printGradFormatFlow();
}
void MKLDNNFcLayer::updateInputData() {
inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
}
void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
weight_->getParameterPtr()->incUpdate(callback);
if (biases_ && biases_->getWGrad()) {
biases_->getParameterPtr()->incUpdate(callback);
}
}
void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetInValue(in);
resetWgtBiasValue(wgt, bias);
resetOutValue(out);
}
void MKLDNNFcLayer::resetInValue(MKLDNNMatrixPtr& in) {
if (inputIsOnlyMKLDNN()) {
const MatrixPtr& inVal = getInputValue(0);
in = std::dynamic_pointer_cast<MKLDNNMatrix>(inVal);
const MatrixPtr& dnnIn = getInputValue(0);
in = std::dynamic_pointer_cast<MKLDNNMatrix>(dnnIn);
CHECK(in) << "Input should be MKLDNNMatrix";
} else {
CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE);
const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
in = MKLDNNMatrix::create(
inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_);
cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_);
}
in->downSpatial();
}
void MKLDNNFcLayer::resetWgtBiasValue(MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias) {
wgt = MKLDNNMatrix::create(
wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_);
weight_->getW(), {oc_, ic_, ih_, iw_}, format::oihw, engine_);
wgt->downSpatial();
bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_)
: nullptr;
out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_);
// change original output value to mkldnn output value
output_.value = std::dynamic_pointer_cast<Matrix>(out);
bias = (biases_ && biases_->getW())
? MKLDNNMatrix::create(biases_->getW(), {oc_}, format::x, engine_)
: nullptr;
}
void MKLDNNFcLayer::resetOutValue(MKLDNNMatrixPtr& out) {
out = MKLDNNMatrix::create(output_.value, {bs_, oc_}, format::nc, engine_);
if (!outputIsOnlyMKLDNN()) {
// fc cpu output value do not need create convert
// just share point
getOutput(CPU_DEVICE).value->setData(output_.value->getData());
getOutput(CPU_DEVICE).value->setData(out->getData());
}
}
// create forward handle
void MKLDNNFcLayer::resetFwdPD(std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
MKLDNNMatrixPtr wgt,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out) {
CHECK(in);
CHECK(wgt);
CHECK(out);
prop_kind pk = prop_kind::forward;
fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
if (hasBias) {
fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out));
fc_fwd::desc fwdDesc = bias != nullptr ? fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_fwd::desc(pk,
in->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
pd.reset(new fc_fwd::primitive_desc(fwdDesc, engine_));
}
void MKLDNNFcLayer::resetFwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (bias) {
fwd_.reset(new fc_fwd(*pd, *in, *wgt, *bias, *out));
} else {
fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out));
fwd_.reset(new fc_fwd(*pd, *in, *wgt, *out));
}
printValueFormatFlow();
pipeline.push_back(*fwd_);
}
void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (!needResetBwd_) {
return;
}
needResetBwd_ = false;
bool hasBias = biases_ && biases_->getWGrad();
void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetOutGrad(out);
resetWgtBiasGrad(wgt, bias);
/// backward weight
CHECK(inVal_) << "Should have input value";
const MatrixPtr& wgtGrad = weight_->getWGrad();
const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr;
resetInGrad(in);
}
void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) {
// TODO(TJ): merge outgrad
int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
// for MKLDNN device:
......@@ -178,66 +239,88 @@ void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
const MatrixPtr& outGrad = getOutput(device).grad;
out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc());
wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc());
bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc())
: nullptr;
// create memory primitive desc
fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward,
inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
fc_bwdWgt::desc bwdWgtDesc = hasBias
? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
fc_bwdWgt::primitive_desc bwdWgtPD =
fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD);
if (hasBias) {
bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias));
} else {
bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt));
CHECK(outVal_);
out =
MKLDNNMatrix::create(getOutput(device).grad, outVal_->getPrimitiveDesc());
}
void MKLDNNFcLayer::resetWgtBiasGrad(MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias) {
CHECK(wgtVal_);
wgt = MKLDNNMatrix::create(weight_->getWGrad(), wgtVal_->getPrimitiveDesc());
bias = nullptr;
if (biasVal_ == nullptr) {
return;
}
pipeline.push_back(*bwdWgt_);
bias =
MKLDNNMatrix::create(biases_->getWGrad(), biasVal_->getPrimitiveDesc());
}
/// backward data
void MKLDNNFcLayer::resetInGrad(MKLDNNMatrixPtr& in) {
in = nullptr;
const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
if (inGrad == nullptr) {
return;
}
if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) {
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
} else {
in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc());
fc_bwdData::primitive_desc bwdDataPD =
fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD);
// TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
CHECK(inVal_);
in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
CHECK(wgtVal_) << "Should have weight memory";
bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in));
printGradFormatFlow();
pipeline.push_back(*bwdData_);
void MKLDNNFcLayer::resetBwdWgtPD(
std::shared_ptr<fc_bwdWgt::primitive_desc>& pd,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
CHECK(inVal_);
fc_bwdWgt::desc bwdWgtDesc = bias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
bias->getMemoryDesc(),
out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgt->getMemoryDesc(),
out->getMemoryDesc());
pd.reset(new fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
}
void MKLDNNFcLayer::updateInputData() {
inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
void MKLDNNFcLayer::resetBwdDataPD(
std::shared_ptr<fc_bwdData::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
pd = nullptr;
if (in == nullptr) {
return;
}
CHECK(wgtVal_);
fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
in->getMemoryDesc(), wgtVal_->getMemoryDesc(), out->getMemoryDesc());
pd.reset(new fc_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
}
void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
weight_->getParameterPtr()->incUpdate(callback);
if (biases_ && biases_->getWGrad()) {
biases_->getParameterPtr()->incUpdate(callback);
void MKLDNNFcLayer::resetBwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<fc_bwdWgt::primitive_desc>& bwdWgtPD,
std::shared_ptr<fc_bwdData::primitive_desc>& bwdDataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
CHECK(inVal_);
if (bias) {
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt, *bias));
} else {
bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt));
}
pipeline.push_back(*bwdWgt_);
if (bwdDataPD == nullptr) {
return;
}
CHECK(wgtVal_) << "Should have weight memory";
bwdData_.reset(new fc_bwdData(*bwdDataPD, *out, *wgtVal_, *in));
pipeline.push_back(*bwdData_);
}
} // namespace paddle
......@@ -18,6 +18,9 @@ limitations under the License. */
#include "mkldnn.hpp"
namespace paddle {
typedef mkldnn::inner_product_forward fc_fwd;
typedef mkldnn::inner_product_backward_weights fc_bwdWgt;
typedef mkldnn::inner_product_backward_data fc_bwdData;
/**
* @brief A subclass of MKLDNNLayer fc layer.
......@@ -32,6 +35,9 @@ protected:
// if has already init the weight
bool hasInitedWgt_;
// save forward primitive_desc, which can be used backward
std::shared_ptr<fc_fwd::primitive_desc> fwdPD_;
// fc weight and bias
std::unique_ptr<Weight> weight_;
std::unique_ptr<Weight> biases_;
......@@ -67,6 +73,59 @@ public:
void convertWeightsFromPaddle() override;
void convertWeightsToPaddle() override;
protected:
/**
* Forward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetInValue(MKLDNNMatrixPtr& in);
void resetWgtBiasValue(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias);
void resetOutValue(MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
MKLDNNMatrixPtr wgt,
MKLDNNMatrixPtr bias,
MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<fc_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(input, output, weight and bias),
* reset primitive descriptor for backward weight,
* reset primitive descriptor for backward data,
* reset pipeline.
*/
void resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetOutGrad(MKLDNNMatrixPtr& out);
void resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias);
void resetInGrad(MKLDNNMatrixPtr& in);
void resetBwdWgtPD(std::shared_ptr<fc_bwdWgt::primitive_desc>& pd,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
void resetBwdDataPD(std::shared_ptr<fc_bwdData::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out);
void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<fc_bwdWgt::primitive_desc>& bwdWgtPD,
std::shared_ptr<fc_bwdData::primitive_desc>& bwdDataPD,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out);
};
} // namespace paddle
......@@ -119,6 +119,10 @@ public:
inputElemenCnt_ = elemenCnt;
reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_);
resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_);
if (outVal_) {
// change original output value to mkldnn output value
output_.value = std::dynamic_pointer_cast<Matrix>(outVal_);
}
convertWeightsFromPaddle();
needResetBwd_ = true;
}
......
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MKLDNNPoolLayer.h"
#include "paddle/math/MathUtils.h"
#include "paddle/utils/Logging.h"
using namespace mkldnn; // NOLINT
typedef memory::format format;
namespace paddle {
REGISTER_LAYER(mkldnn_pool, MKLDNNPoolLayer);
bool MKLDNNPoolLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
if (!MKLDNNLayer::init(layerMap, parameterMap)) {
return false;
}
/* the size of inputs for pool-layer is 1 */
CHECK_EQ(config_.inputs_size(), 1);
const PoolConfig& conf = config_.inputs(0).pool_conf();
ic_ = conf.channels();
ih_ = conf.img_size_y();
iw_ = conf.img_size();
oc_ = ic_;
oh_ = conf.output_y();
ow_ = conf.output_x();
fh_ = conf.size_y();
fw_ = conf.size_x();
ph_ = conf.padding_y();
pw_ = conf.padding();
sh_ = conf.stride_y();
sw_ = conf.stride();
const std::string& type = conf.pool_type();
if (type == "max-projection") {
poolAlgo_ = algorithm::pooling_max;
} else if (type == "avg-projection") {
// paddle only use exclude_padding
poolAlgo_ = algorithm::pooling_avg_exclude_padding;
} else {
LOG(FATAL) << "unknow pooling type!";
}
return true;
}
void MKLDNNPoolLayer::reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
reshapeInput(bs, ih, iw);
// ic_ and oc can not be changed
CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic)
<< "Input channel can not be changed";
// cal output sizes
// paddle used false caffeMode for pooling
oh = outputSize(ih, fh_, ph_, sh_, false);
ow = outputSize(iw, fw_, pw_, sw_, false);
reshapeOutput(oh, ow);
resizeOutput(bs, oc * oh * ow);
printSizeInfo();
}
void MKLDNNPoolLayer::resetFwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
resetFwdBuffers(in, out);
resetFwdPD(fwdPD_, in, out);
resetFwdPipeline(pipeline, fwdPD_, in, out);
printValueFormatFlow();
}
void MKLDNNPoolLayer::resetBwd(std::vector<primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
std::shared_ptr<pool_bwd::primitive_desc> pd;
resetBwdBuffers(in, out);
resetBwdPD(pd, in, out);
resetBwdPipeline(pipeline, pd, in, out);
printGradFormatFlow();
}
void MKLDNNPoolLayer::updateInputData() {
inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
}
void MKLDNNPoolLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
resetInValue(in);
resetOutValue(out);
}
void MKLDNNPoolLayer::resetInValue(MKLDNNMatrixPtr& in) {
if (inputIsOnlyMKLDNN()) {
const MatrixPtr& dnnIn = getInputValue(0);
in = std::dynamic_pointer_cast<MKLDNNMatrix>(dnnIn);
CHECK(in) << "Input should be MKLDNNMatrix";
} else {
CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
in = MKLDNNMatrix::create(
cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_);
}
}
void MKLDNNPoolLayer::resetOutValue(MKLDNNMatrixPtr& out) {
CHECK(inVal_) << "Should reset input value first";
memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
out = MKLDNNMatrix::create(
output_.value, outDims, inVal_->getFormat(), engine_);
// create reorder if output value has cpu device and pd do not match
cpuOutVal_ = nullptr;
cvtOutVal_ = nullptr;
if (!outputIsOnlyMKLDNN()) {
const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value;
cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_);
if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_);
CHECK(cvtOutVal_) << "should not be emptry";
} else {
// CPU output share the same data of MKLDNN output
cpuOut->setData(out->getData());
cpuOutVal_ = out;
}
}
}
void MKLDNNPoolLayer::resetFwdPD(std::shared_ptr<pool_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
MKLDNNMatrixPtr out) {
memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
memory::dims kernels = memory::dims{fh_, fw_};
memory::dims strides = memory::dims{sh_, sw_};
memory::dims padL = memory::dims{ph_, pw_};
memory::dims padR = getPaddingR();
padding_kind padKind = padding_kind::zero;
prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring
: prop_kind::forward_training;
auto fwdDesc = pool_fwd::desc(pk,
poolAlgo_,
in->getMemoryDesc(),
out->getMemoryDesc(),
strides,
kernels,
padL,
padR,
padKind);
pd.reset(new pool_fwd::primitive_desc(fwdDesc, engine_));
// prepare workspace if necessary
workspace_ =
(passType_ != PASS_TEST && poolAlgo_ == algorithm::pooling_max)
? std::make_shared<memory>(memory(pd->workspace_primitive_desc()))
: nullptr;
}
void MKLDNNPoolLayer::resetFwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<pool_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
pipeline.clear();
fwd_ = workspace_
? std::make_shared<pool_fwd>(pool_fwd(*pd, *in, *out, *workspace_))
: std::make_shared<pool_fwd>(pool_fwd(*pd, *in, *out));
pipeline.push_back(*fwd_);
if (cvtOutVal_) {
pipeline.push_back(*cvtOutVal_);
}
}
void MKLDNNPoolLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
resetOutGrad(out);
resetInGrad(in);
}
void MKLDNNPoolLayer::resetOutGrad(MKLDNNMatrixPtr& out) {
CHECK(outVal_) << "Should have output value";
out = MKLDNNMatrix::create(output_.grad, outVal_->getPrimitiveDesc());
// create reorder if output value has cpu device and pd do not match
cpuOutGrad_ = nullptr;
cvtOutGrad_ = nullptr;
if (!outputIsOnlyMKLDNN()) {
const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad;
cpuOutGrad_ = MKLDNNMatrix::create(
cpuOut, memory::dims{bs_, oc_, oh_, ow_}, format::nchw, engine_);
if (cpuOutGrad_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out);
CHECK(cvtOutGrad_) << "should not be emptry";
} else {
// share the same data of CPU output
output_.grad->setData(cpuOut->getData());
out = cpuOutGrad_;
}
}
}
void MKLDNNPoolLayer::resetInGrad(MKLDNNMatrixPtr& in) {
in = nullptr;
const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
if (inGrad == nullptr) {
return;
}
CHECK(inVal_);
in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
void MKLDNNPoolLayer::resetBwdPD(std::shared_ptr<pool_bwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
memory::dims kernels = memory::dims{fh_, fw_};
memory::dims strides = memory::dims{sh_, sw_};
memory::dims padL = memory::dims{ph_, pw_};
memory::dims padR = getPaddingR();
CHECK(in);
CHECK(out);
auto bwdDesc = pool_bwd::desc(poolAlgo_,
in->getMemoryDesc(),
out->getMemoryDesc(),
strides,
kernels,
padL,
padR,
padding_kind::zero);
pd.reset(new pool_bwd::primitive_desc(bwdDesc, engine_, *fwdPD_));
}
void MKLDNNPoolLayer::resetBwdPipeline(
std::vector<primitive>& pipeline,
std::shared_ptr<pool_bwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (cvtOutGrad_) {
pipeline.push_back(*cvtOutGrad_);
}
bwdData_ =
workspace_
? std::make_shared<pool_bwd>(pool_bwd(*pd, *out, *workspace_, *in))
: std::make_shared<pool_bwd>(pool_bwd(*pd, *out, *in));
pipeline.push_back(*bwdData_);
}
} // namespace paddle
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "MKLDNNLayer.h"
#include "mkldnn.hpp"
namespace paddle {
typedef mkldnn::pooling_forward pool_fwd;
typedef mkldnn::pooling_backward pool_bwd;
/**
* @brief A subclass of MKLDNNLayer pool layer.
*
* The config file api is mkldnn_pool
*/
class MKLDNNPoolLayer : public MKLDNNLayer {
protected:
// padding height and width
int ph_, pw_;
// stride height and width
int sh_, sw_;
// filter(kenerl) height and width
int fh_, fw_;
// pooling_avg or pooling_max
mkldnn::algorithm poolAlgo_;
// MKLDNNMatrixPtr which should be created from CPU Device
MKLDNNMatrixPtr cpuOutVal_;
MKLDNNMatrixPtr cpuOutGrad_;
// convert handle between CPU device and MKLDNN device
std::shared_ptr<mkldnn::reorder> cvtOutVal_;
std::shared_ptr<mkldnn::reorder> cvtOutGrad_;
// save forward primitive_desc, which can be used backward
std::shared_ptr<pool_fwd::primitive_desc> fwdPD_;
// according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
// test_pooling_forward.cpp, pool need workspace for backward
std::shared_ptr<mkldnn::memory> workspace_;
public:
explicit MKLDNNPoolLayer(const LayerConfig& config) : MKLDNNLayer(config) {}
~MKLDNNPoolLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) override;
void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) override;
void updateInputData() override;
void printSizeInfo() override {
MKLDNNLayer::printSizeInfo();
VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
<< ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
<< ", sw: " << sw_;
}
protected:
/**
* Forward functions: reset buffers(input, output),
* reset primitive descriptor,
* reset pipeline.
*/
void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
void resetInValue(MKLDNNMatrixPtr& in);
void resetOutValue(MKLDNNMatrixPtr& out);
void resetFwdPD(std::shared_ptr<pool_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr in,
MKLDNNMatrixPtr out);
void resetFwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<pool_fwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out);
/**
* Backward functions: reset buffers(input, output),
* reset primitive descriptor,
* reset pipeline.
*/
void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
void resetOutGrad(MKLDNNMatrixPtr& out);
void resetInGrad(MKLDNNMatrixPtr& in);
void resetBwdPD(std::shared_ptr<pool_bwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out);
void resetBwdPipeline(std::vector<mkldnn::primitive>& pipeline,
std::shared_ptr<pool_bwd::primitive_desc>& pd,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& out);
/**
* get padding_r according to
* https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
* test_pooling_forward.cpp
*/
mkldnn::memory::dims getPaddingR() const {
mkldnn::memory::dims padR = {ph_, pw_};
for (int i = 0; i < 2; ++i) {
if ((ih_ + ph_ + padR[0] - fh_) / sh_ + 1 < oh_) {
++padR[0];
}
if ((iw_ + pw_ + padR[1] - fw_) / sw_ + 1 < ow_) {
++padR[1];
}
}
return padR;
}
};
} // namespace paddle
......@@ -73,9 +73,10 @@ void SequenceSliceLayer::checkInputs() {
CHECK(inputSeq.hasSeq()) << "The first input of sequence slice layer "
<< "must be a sequence.";
const MatrixPtr indices1 = getInputValue(1);
CHECK_EQ(static_cast<size_t>(indices1->getHeight()),
inputSeq.hasSubseq() ? inputSeq.getNumSubSequences()
: inputSeq.getNumSequences())
CHECK_EQ(
indices1->getHeight(),
static_cast<size_t>(inputSeq.hasSubseq() ? inputSeq.getNumSubSequences()
: inputSeq.getNumSequences()))
<< "Height of the second input should be equal to number of sequence "
<< "in the first input.";
if (inputLayers_.size() == 3) {
......@@ -151,7 +152,7 @@ void SequenceSliceLayer::calSelectedRows(const MatrixPtr starts,
if (ends) endPos = inputSeqInfoVec_[i][j] + ends->getElement(rowIdx, k);
int seqLen = endPos - begPos + 1;
CHECK_GT(seqLen, 0U);
CHECK_GT(seqLen, 0);
for (int m = begPos; m <= endPos; ++m) selectedRows_.push_back(m);
hasSubseq
? outSubSeqStartPos_.push_back(outSubSeqStartPos_.back() + seqLen)
......
......@@ -64,15 +64,17 @@ void MKLDNNTester::reset(const TestConfig& dnn,
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
}
refLayer_ = testLayers_[REF];
dnnLayer_ = std::dynamic_pointer_cast<MKLDNNLayer>(testLayers_[DNN]);
CHECK(dnnLayer_);
// for comparison with Paddle reference results,
// need manually add cpu device output for test
dnnLayer_->addOutputArgument(CPU_DEVICE);
dnnLayer_ = testLayers_[DNN];
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
setInputImgSize();
// for comparison with Paddle reference results,
// need manually add cpu device output for test
MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
if (dnnLayer) {
dnnLayer->addOutputArgument(CPU_DEVICE);
}
}
void MKLDNNTester::setInputImgSize() {
......@@ -122,7 +124,7 @@ void MKLDNNTester::randomTopDiffs() {
void MKLDNNTester::checkForward() {
VLOG(MKLDNN_ALL) << "Check Forward";
printTopDatas();
double delta = compareMatrix(dnnLayer_->getOutput(-1).value,
double delta = compareMatrix(dnnLayer_->getOutput(CPU_DEVICE).value,
refLayer_->getOutputValue());
EXPECT_LE(fabs(delta), eps_);
}
......@@ -155,7 +157,10 @@ void MKLDNNTester::checkBackwardWgts() {
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts);
dnnLayer_->convertWeightsToPaddle();
MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
if (dnnLayer) {
dnnLayer->convertWeightsToPaddle();
}
for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
......@@ -322,6 +327,10 @@ void MKLDNNTester::runOnce() {
// and clearTopDatas(REF) should be coverd by ref layers
clearBotDiffs(REF);
clearWgtDiffs(REF);
// it is necessary to clear bottom diffs when only activation is dnn type
if (configs_[DNN].layerConfig.active_type().compare(0, 7, "mkldnn_") == 0) {
clearBotDiffs(DNN);
}
}
void MKLDNNTester::run(const TestConfig& dnn,
......@@ -333,8 +342,19 @@ void MKLDNNTester::run(const TestConfig& dnn,
float epsilon,
bool log,
int level) {
VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: " << dnn.layerConfig.type()
<< " vs " << ref.layerConfig.type();
CHECK(dnn.layerConfig.type().compare(0, 7, "mkldnn_") == 0 ||
dnn.layerConfig.active_type().compare(0, 7, "mkldnn_") == 0)
<< "should be MKLDNN layer or MKLDNN activation";
if (dnn.layerConfig.type() == ref.layerConfig.type()) {
VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: "
<< dnn.layerConfig.active_type() << " vs "
<< ref.layerConfig.active_type();
} else {
VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: "
<< dnn.layerConfig.type() << " vs "
<< ref.layerConfig.type();
}
ih_ = inputImgH;
iw_ = inputImgW;
iter_ = iter;
......
......@@ -41,8 +41,7 @@ protected:
vector<LayerMap> layerMaps_;
vector<vector<ParameterPtr>> parameters_;
vector<LayerPtr> testLayers_;
LayerPtr refLayer_;
MKLDNNLayerPtr dnnLayer_;
LayerPtr refLayer_, dnnLayer_;
/// run some iterations, all the result should pass
size_t iter_;
......
......@@ -17,6 +17,7 @@ limitations under the License. */
#include <vector>
#include "MKLDNNTester.h"
#include "ModelConfig.pb.h"
#include "paddle/gserver/activations/MKLDNNActivation.h"
#include "paddle/math/MathUtils.h"
using namespace paddle; // NOLINT
......@@ -141,6 +142,110 @@ TEST(MKLDNNLayer, ConvLayer) {
testConvLayer({4, 4, 16, 3, 3, 16, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1});
}
struct testPoolDesc {
int bs, ch; // input channel and output channel are the same
int ih, iw;
int oh, ow;
int fh, fw;
int ph, pw;
int sh, sw;
};
void testPoolLayer(const testPoolDesc& pm) {
const std::string compareTypes[] = {"mkldnn_pool", "pool"};
TestConfig cfg;
cfg.layerConfig.set_type(compareTypes[0]);
cfg.layerConfig.set_size(pm.ch * pm.oh * pm.ow);
cfg.inputDefs.push_back(
{INPUT_DATA,
"layer_0",
/* size of input layer= */ size_t(pm.ch * pm.ih * pm.iw),
0});
LayerInputConfig* input = cfg.layerConfig.add_inputs();
PoolConfig* pool = input->mutable_pool_conf();
pool->set_channels(pm.ch);
pool->set_img_size(pm.iw);
pool->set_img_size_y(pm.ih);
pool->set_output_x(pm.ow);
pool->set_output_y(pm.oh);
pool->set_size_x(pm.fw);
pool->set_size_y(pm.fh);
pool->set_padding(pm.pw);
pool->set_padding_y(pm.ph);
pool->set_stride(pm.sw);
pool->set_stride_y(pm.sh);
int oh = outputSize(pm.ih, pm.fh, pm.ph, pm.sh, false);
int ow = outputSize(pm.iw, pm.fw, pm.pw, pm.sw, false);
CHECK_EQ(ow, pm.ow) << "output size check failed";
CHECK_EQ(oh, pm.oh) << "output size check failed";
MKLDNNTester tester;
for (auto type : {"max-projection", "avg-projection"}) {
pool->set_pool_type(type);
TestConfig ref = cfg;
ref.layerConfig.set_type(compareTypes[1]);
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
}
}
TEST(MKLDNNLayer, PoolLayer) {
/* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw*/
testPoolLayer({2, 1, 4, 4, 2, 2, 3, 3, 0, 0, 2, 2});
testPoolLayer({10, 8, 16, 16, 8, 8, 2, 2, 0, 0, 2, 2});
testPoolLayer({4, 2, 5, 5, 3, 3, 3, 3, 1, 1, 2, 2});
testPoolLayer({8, 16, 56, 56, 28, 28, 3, 3, 0, 0, 2, 2});
testPoolLayer({8, 16, 14, 14, 7, 7, 3, 3, 0, 0, 2, 2});
testPoolLayer({4, 16, 7, 7, 1, 1, 7, 7, 0, 0, 1, 1});
testPoolLayer({4, 2, 5, 5, 3, 3, 5, 5, 1, 1, 1, 1});
testPoolLayer({2, 8, 56, 56, 29, 29, 3, 3, 1, 1, 2, 2});
}
struct testActDesc {
int bs, ch;
int ih, iw;
};
static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) {
cfg.biasSize = 0;
cfg.layerConfig.set_type("addto");
cfg.layerConfig.set_size(pm.ch * pm.ih * pm.iw);
cfg.inputDefs.push_back(
{INPUT_DATA,
"layer_0",
/* size of input layer= */ size_t(pm.ch * pm.ih * pm.iw),
0});
cfg.layerConfig.add_inputs();
}
void testActivation(std::string& type, const testActDesc& pm) {
const std::string compareTypes[] = {type, type.erase(0, 7)};
TestConfig cfg;
getAddtoConfig(cfg, pm);
TestConfig ref = cfg;
cfg.layerConfig.set_active_type(compareTypes[0]);
ref.layerConfig.set_active_type(compareTypes[1]);
MKLDNNTester tester;
for (auto bs : {pm.bs, 1}) {
tester.run(cfg, ref, bs, pm.ih, pm.iw);
}
}
TEST(MKLDNNActivation, Activations) {
auto types = MKLDNNActivation::getAllRegisteredTypes();
// TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
std::set<string> excluded{"mkldnn_softmax", "mkldnn_elu"};
for (auto type : types) {
if (excluded.count(type)) {
continue;
}
testActivation(type, {16, 64, 32, 32});
}
}
// TODO(TJ): add branch test
int main(int argc, char** argv) {
......
......@@ -17,6 +17,7 @@ limitations under the License. */
#include <cmath>
#include "BaseMatrix.h"
#include "MathFunctions.h"
#include "NEONFunctions.h"
#include "SIMDFunctions.h"
#include "hl_matrix_apply.cuh"
#include "hl_matrix_base.cuh"
......@@ -666,6 +667,13 @@ void BaseMatrixT<T>::relu(BaseMatrixT& b) {
applyBinary(binary::Relu<T>(), b);
}
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <>
void BaseMatrixT<float>::relu(BaseMatrixT& b) {
neon::relu(data_, b.data_, height_ * width_);
}
#endif
DEFINE_MATRIX_BINARY_OP(ReluDerivative, a *= (b > 0.0f ? 1.0f : 0.0f));
template <class T>
void BaseMatrixT<T>::reluDerivative(BaseMatrixT& b) {
......
......@@ -66,11 +66,12 @@ public:
/**
* Create reorder primitive.
* Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
* checkData: for whether to check the data handle of src and dst is the same.
* if true, means check it and do not want support inplace reorder;
* otherwise do not check data which means the created reorder
* maybe inplace buffer and do not guarantee the logical is correct
* since not all format or conversion support inplace.
* checkData: whether to check the data handle of src and dst.
* if true, it will check the data and do not allow them equal;
* otherwise, it will not check them, then the reorder created
* may have inplace buffer.
* Do not set false, if you can not guarantee the inplace logical
* would work with your reorder.
*/
static std::shared_ptr<mkldnn::reorder> createReorder(
const MKLDNNMatrixPtr& src,
......
......@@ -26,7 +26,7 @@ limitations under the License. */
#include <mkl_lapacke.h>
#endif
#ifdef PADDLE_USE_ATLAS
#if defined(PADDLE_USE_ATLAS) || defined(PADDLE_USE_VECLIB)
extern "C" {
#include <cblas.h>
#include <clapack.h>
......
......@@ -1033,17 +1033,15 @@ void GpuMatrix::maxPoolForward(Matrix& inputMat,
real* inputData = inputMat.getData();
size_t frameNum = inputMat.getHeight();
size_t width = imgSizeW;
size_t height = imgSizeH;
CHECK(height * width * channels == inputMat.getWidth());
CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputH * outputW * channels);
hl_maxpool_forward(frameNum,
inputData,
channels,
height,
width,
imgSizeH,
imgSizeW,
outputH,
outputW,
sizeX,
......@@ -1080,11 +1078,8 @@ void GpuMatrix::maxPoolBackward(Matrix& inputMat,
real* outDiff = outGrad.getData();
size_t frameNum = inputMat.getHeight();
size_t channels = outV.getWidth() / outputH / outputW;
size_t width = imgSizeW;
size_t height = imgSizeH;
CHECK(height * width * channels == inputMat.getWidth());
CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == width * height * channels);
CHECK(outGrad.getHeight() == outV.getHeight() &&
outGrad.getWidth() == outV.getWidth());
......@@ -1093,8 +1088,8 @@ void GpuMatrix::maxPoolBackward(Matrix& inputMat,
outData,
outDiff,
channels,
height,
width,
imgSizeH,
imgSizeW,
outputH,
outputW,
sizeX,
......@@ -1125,17 +1120,15 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat,
real* inputData = inputMat.getData();
size_t frameNum = inputMat.getHeight();
size_t height = imgSizeH;
size_t width = imgSizeW;
CHECK(height * width * channels == inputMat.getWidth());
CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputH * outputW * channels);
hl_avgpool_forward(frameNum,
inputData,
channels,
height,
width,
imgSizeH,
imgSizeW,
outputH,
outputW,
sizeX,
......@@ -1166,17 +1159,15 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad,
real* outDiff = outGrad.getData();
size_t frameNum = outGrad.getHeight();
size_t channels = outGrad.getWidth() / outputH / outputW;
size_t height = imgSizeH;
size_t width = imgSizeW;
CHECK(height * width * channels == width_);
CHECK(imgSizeH * imgSizeW * channels == width_);
CHECK(height_ == outGrad.getHeight());
CHECK(outGrad.getWidth() == outputH * outputW * channels);
hl_avgpool_backward(frameNum,
outDiff,
channels,
height,
width,
imgSizeH,
imgSizeW,
outputH,
outputW,
sizeX,
......@@ -1214,19 +1205,16 @@ void GpuMatrix::maxPool3DForward(Matrix& inputMat,
real* inputData = inputMat.getData();
real* maxPoolIdxData = maxPoolIdx.getData();
size_t num = inputMat.getHeight();
size_t width = imgSizeW;
size_t height = imgSizeH;
size_t depth = imgSizeD;
CHECK(depth * height * width * channels == inputMat.getWidth());
CHECK(imgSizeD * imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputD * outputH * outputW * channels);
hl_maxpool3D_forward(num,
inputData,
channels,
depth,
height,
width,
imgSizeD,
imgSizeH,
imgSizeW,
outputD,
outputH,
outputW,
......@@ -1269,20 +1257,16 @@ void GpuMatrix::maxPool3DBackward(Matrix& outGrad,
real* maxPoolIdxData = maxPoolIdx.getData();
size_t frameNum = getHeight();
size_t channels = outGrad.getWidth() / outputD / outputH / outputW;
size_t width = imgSizeW;
size_t height = imgSizeH;
size_t depth = imgSizeD;
CHECK(depth * height * width * channels == getWidth());
CHECK(width_ == depth * width * height * channels);
CHECK(imgSizeD * imgSizeH * imgSizeW * channels == getWidth());
CHECK(outGrad.getHeight() == maxPoolIdx.getHeight() &&
outGrad.getWidth() == maxPoolIdx.getWidth());
hl_maxpool3D_backward(frameNum,
outDiff,
channels,
depth,
height,
width,
imgSizeD,
imgSizeH,
imgSizeW,
outputD,
outputH,
outputW,
......@@ -1323,19 +1307,16 @@ void GpuMatrix::avgPool3DForward(Matrix& inputMat,
real* inputData = inputMat.getData();
size_t frameNum = inputMat.getHeight();
size_t height = imgSizeH;
size_t width = imgSizeW;
size_t depth = imgSizeD;
CHECK(depth * height * width * channels == inputMat.getWidth());
CHECK(imgSizeD * imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputD * outputH * outputW * channels);
hl_avgpool3D_forward(frameNum,
inputData,
channels,
depth,
height,
width,
imgSizeD,
imgSizeH,
imgSizeW,
outputD,
outputH,
outputW,
......@@ -1375,19 +1356,16 @@ void GpuMatrix::avgPool3DBackward(Matrix& outGrad,
real* outDiff = outGrad.getData();
size_t frameNum = outGrad.getHeight();
size_t channels = outGrad.getWidth() / outputD / outputH / outputW;
size_t height = imgSizeH;
size_t width = imgSizeW;
size_t depth = imgSizeD;
CHECK(depth * height * width * channels == width_);
CHECK(imgSizeD * imgSizeH * imgSizeW * channels == width_);
CHECK(height_ == outGrad.getHeight());
CHECK(outGrad.getWidth() == outputD * outputH * outputW * channels);
hl_avgpool3D_backward(frameNum,
outDiff,
channels,
depth,
height,
width,
imgSizeD,
imgSizeH,
imgSizeW,
outputD,
outputH,
outputW,
......@@ -1999,11 +1977,11 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
real* inputData = inputMat.getData();
real* outData = data_;
size_t num = inputMat.getHeight();
size_t inWidth = imgSizeW;
size_t inHeight = imgSizeH;
CHECK(inHeight * inWidth == inputMat.getWidth() / channels);
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
CHECK(inLength == inputMat.getWidth() / channels);
CHECK_EQ(num, this->getHeight());
CHECK_EQ(channels * outputH * outputW, this->getWidth());
CHECK_EQ(channels * outLength, this->getWidth());
size_t outStride = getStride();
/* initialize the data_ */
......@@ -2020,24 +1998,24 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
}
for (size_t c = 0; c < channels; ++c) { // channel by channel
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int hend = std::min(hstart + sizeY, inHeight);
int wend = std::min(wstart + sizeX, inWidth);
hstart = std::max(hstart, 0);
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
outData[ph * outputW + pw] = std::max(outData[ph * outputW + pw],
inputData[h * inWidth + w]);
outData[ph * outputW + pw] = std::max(
outData[ph * outputW + pw], inputData[h * imgSizeW + w]);
}
}
}
}
// compute offset
inputData += inHeight * inWidth;
outData += outputH * outputW;
inputData += inLength;
outData += outLength;
}
}
}
......@@ -2058,8 +2036,10 @@ void CpuMatrix::maxPoolBackward(Matrix& image,
size_t paddingH,
size_t paddingW) {
size_t num = image.getHeight();
size_t channels = size_t(width_ / imgSizeH / imgSizeW);
CHECK(image.getWidth() == imgSizeH * imgSizeW * channels);
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
size_t channels = size_t(width_ / inLength);
CHECK(image.getWidth() == inLength * channels);
CHECK(image.getHeight() == height_ && image.getWidth() == width_);
CHECK(outV.getHeight() == outGrad.getHeight() &&
outV.getWidth() == outGrad.getWidth());
......@@ -2080,12 +2060,12 @@ void CpuMatrix::maxPoolBackward(Matrix& image,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int hend = std::min(hstart + sizeY, imgSizeH);
int wend = std::min(wstart + sizeX, imgSizeW);
hstart = std::max(hstart, 0);
wstart = std::max(wstart, 0);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
......@@ -2098,10 +2078,10 @@ void CpuMatrix::maxPoolBackward(Matrix& image,
}
}
// offset
inData += imgSizeH * imgSizeW;
tgtGrad += imgSizeH * imgSizeW;
otData += outputH * outputW;
otGrad += outputH * outputW;
inData += inLength;
tgtGrad += inLength;
otData += outLength;
otGrad += outLength;
}
}
}
......@@ -2120,10 +2100,10 @@ void CpuMatrix::avgPoolForward(Matrix& input,
size_t paddingW) {
// The main loop
size_t num = input.getHeight();
size_t inHeight = imgSizeH;
size_t inWidth = imgSizeW;
CHECK(inHeight * inWidth * channels == input.getWidth());
CHECK(outputH * outputW * channels * num == height_ * width_);
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
CHECK(inLength * channels == input.getWidth());
CHECK(outLength * channels * num == height_ * width_);
real* tgtData = data_;
real* inData = input.getData();
......@@ -2133,30 +2113,27 @@ void CpuMatrix::avgPoolForward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int hend = std::min(hstart + sizeY, inHeight + paddingH);
int wend = std::min(wstart + sizeX, inWidth + paddingW);
int poolSize = (hend - hstart) * (wend - wstart);
hstart = std::max(hstart, 0);
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
hend = std::min(hend, static_cast<int>(inHeight));
wend = std::min(wend, static_cast<int>(inWidth));
CHECK(poolSize);
tgtData[ph * outputW + pw] = 0; // clear
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
tgtData[ph * outputW + pw] += inData[h * inWidth + w];
tgtData[ph * outputW + pw] += inData[h * imgSizeW + w];
}
}
int poolSize = (hend - hstart) * (wend - wstart);
CHECK(poolSize);
tgtData[ph * outputW + pw] /= poolSize;
}
}
// compute offset
inData += inHeight * inWidth;
tgtData += outputH * outputW;
inData += inLength;
tgtData += outLength;
}
}
}
......@@ -2176,7 +2153,9 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
size_t paddingW) {
size_t num = input.getHeight();
size_t channels = input.getWidth() / outputH / outputW;
CHECK(imgSizeH * imgSizeW * channels == getWidth());
size_t inLength = imgSizeH * imgSizeW;
size_t outLength = outputH * outputW;
CHECK(inLength * channels == getWidth());
real* inData = input.getData();
real* outData = getData();
......@@ -2186,16 +2165,14 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int hend = std::min(hstart + sizeY, imgSizeH + paddingH);
int wend = std::min(wstart + sizeX, imgSizeW + paddingW);
int poolSize = (hend - hstart) * (wend - wstart);
hstart = std::max(hstart, 0);
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
hend = std::min(hend, static_cast<int>(imgSizeH));
wend = std::min(wend, static_cast<int>(imgSizeW));
int poolSize = (hend - hstart) * (wend - wstart);
CHECK(poolSize);
for (int h = hstart; h < hend; ++h) {
......@@ -2206,8 +2183,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
}
}
// offset
outData += imgSizeH * imgSizeW;
inData += outputH * outputW;
outData += inLength;
inData += outLength;
}
}
}
......@@ -2234,12 +2211,11 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
real* outData = getData();
real* maxPoolIdxData = maxPoolIdx.getData();
size_t num = inputMat.getHeight();
size_t inWidth = imgSizeW;
size_t inHeight = imgSizeH;
size_t inDepth = imgSizeD;
CHECK(inHeight * inWidth * inDepth == inputMat.getWidth() / channels);
size_t inLength = imgSizeH * imgSizeW * imgSizeD;
size_t outLength = outputH * outputW * outputD;
CHECK(inLength == inputMat.getWidth() / channels);
CHECK_EQ(num, this->getHeight());
CHECK_EQ(channels * outputH * outputW * outputD, this->getWidth());
CHECK_EQ(channels * outLength, this->getWidth());
size_t outStride = getStride();
/* initialize the data_ */
......@@ -2258,16 +2234,16 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
}
for (size_t c = 0; c < channels; ++c) { // channel by channel
for (size_t pd = 0; pd < outputD; ++pd) {
int dstart = pd * strideD - paddingD;
int dend = std::min(dstart + sizeZ, imgSizeD);
dstart = std::max(dstart, 0);
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int dstart = pd * strideD - paddingD;
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int dend = std::min(dstart + sizeZ, inDepth);
int hend = std::min(hstart + sizeY, inHeight);
int wend = std::min(wstart + sizeX, inWidth);
dstart = std::max(dstart, 0);
hstart = std::max(hstart, 0);
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
int maxIdx = -1;
real maxOutData = outData[(pd * outputH + ph) * outputW + pw];
......@@ -2275,9 +2251,9 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (maxOutData <
inputData[(d * inHeight + h) * inWidth + w]) {
maxOutData = inputData[(d * inHeight + h) * inWidth + w];
maxIdx = (d * inHeight + h) * inWidth + w;
inputData[(d * imgSizeH + h) * imgSizeW + w]) {
maxOutData = inputData[(d * imgSizeH + h) * imgSizeW + w];
maxIdx = (d * imgSizeH + h) * imgSizeW + w;
}
}
}
......@@ -2288,9 +2264,9 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
}
}
// compute offset
inputData += inDepth * inHeight * inWidth;
outData += outputD * outputH * outputW;
maxPoolIdxData += outputD * outputH * outputW;
inputData += inLength;
outData += outLength;
maxPoolIdxData += outLength;
}
}
}
......@@ -2315,7 +2291,9 @@ void CpuMatrix::maxPool3DBackward(Matrix& outGrad,
real scaleTargets,
real scaleOutput) {
size_t num = getHeight();
size_t channels = size_t(width_ / imgSizeD / imgSizeH / imgSizeW);
size_t inLength = imgSizeH * imgSizeW * imgSizeD;
size_t outLength = outputH * outputW * outputD;
size_t channels = size_t(width_ / inLength);
CHECK(maxPoolIdx.getHeight() == outGrad.getHeight() &&
maxPoolIdx.getWidth() == outGrad.getWidth());
......@@ -2341,9 +2319,9 @@ void CpuMatrix::maxPool3DBackward(Matrix& outGrad,
}
}
// offset
tgtGrad += imgSizeD * imgSizeH * imgSizeW;
otGrad += outputD * outputH * outputW;
maxPoolIdxData += outputD * outputH * outputW;
tgtGrad += inLength;
otGrad += outLength;
maxPoolIdxData += outLength;
}
}
}
......@@ -2367,11 +2345,10 @@ void CpuMatrix::avgPool3DForward(Matrix& input,
size_t paddingW) {
// The main loop
size_t num = input.getHeight();
size_t inDepth = imgSizeD;
size_t inHeight = imgSizeH;
size_t inWidth = imgSizeW;
CHECK(inDepth * inHeight * inWidth * channels == input.getWidth());
CHECK(outputD * outputH * outputW * channels * num == height_ * width_);
size_t inLength = imgSizeH * imgSizeW * imgSizeD;
size_t outLength = outputH * outputW * outputD;
CHECK(inLength * channels == input.getWidth());
CHECK(outLength * channels * num == height_ * width_);
real* tgtData = getData();
real* inData = input.getData();
......@@ -2381,39 +2358,36 @@ void CpuMatrix::avgPool3DForward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t pd = 0; pd < outputD; ++pd) {
int dstart = pd * strideD - paddingD;
int dend = std::min(dstart + sizeZ, imgSizeD);
dstart = std::max(dstart, 0);
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int dstart = pd * strideD - paddingD;
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int dend = std::min(dstart + sizeZ, inDepth + paddingD);
int hend = std::min(hstart + sizeY, inHeight + paddingH);
int wend = std::min(wstart + sizeX, inWidth + paddingW);
int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
dstart = std::max(dstart, 0);
hstart = std::max(hstart, 0);
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
dend = std::min(dend, static_cast<int>(inDepth));
hend = std::min(hend, static_cast<int>(inHeight));
wend = std::min(wend, static_cast<int>(inWidth));
CHECK(poolSize);
tgtData[(pd * outputH + ph) * outputW + pw] = 0; // clear
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
tgtData[(pd * outputH + ph) * outputW + pw] +=
inData[(d * inHeight + h) * inWidth + w];
inData[(d * imgSizeH + h) * imgSizeW + w];
}
}
}
int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
CHECK(poolSize);
tgtData[(pd * outputH + ph) * outputW + pw] /= poolSize;
}
}
}
// compute offset
inData += inDepth * inHeight * inWidth;
tgtData += outputD * outputH * outputW;
inData += inLength;
tgtData += outLength;
}
}
}
......@@ -2437,8 +2411,10 @@ void CpuMatrix::avgPool3DBackward(Matrix& input,
real scaleTargets,
real scaleOutput) {
size_t num = input.getHeight();
size_t channels = input.getWidth() / outputD / outputH / outputW;
CHECK(imgSizeD * imgSizeH * imgSizeW * channels == getWidth());
size_t inLength = imgSizeH * imgSizeW * imgSizeD;
size_t outLength = outputH * outputW * outputD;
size_t channels = input.getWidth() / outLength;
CHECK(inLength * channels == getWidth());
real* inData = input.getData();
real* outData = getData();
......@@ -2448,21 +2424,18 @@ void CpuMatrix::avgPool3DBackward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t pd = 0; pd < outputD; ++pd) {
int dstart = pd * strideD - paddingD;
int dend = std::min(dstart + sizeZ, imgSizeD);
dstart = std::max(dstart, 0);
for (size_t ph = 0; ph < outputH; ++ph) {
int hstart = ph * strideH - paddingH;
int hend = std::min(hstart + sizeY, imgSizeH);
hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
int dstart = pd * strideD - paddingD;
int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
int dend = std::min(dstart + sizeZ, imgSizeD + paddingD);
int hend = std::min(hstart + sizeY, imgSizeH + paddingH);
int wend = std::min(wstart + sizeX, imgSizeW + paddingW);
int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
dstart = std::max(dstart, 0);
hstart = std::max(hstart, 0);
int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
dend = std::min(dend, static_cast<int>(imgSizeD));
hend = std::min(hend, static_cast<int>(imgSizeH));
wend = std::min(wend, static_cast<int>(imgSizeW));
int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
CHECK(poolSize);
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
......@@ -2476,8 +2449,8 @@ void CpuMatrix::avgPool3DBackward(Matrix& input,
}
}
// offset
outData += imgSizeD * imgSizeH * imgSizeW;
inData += outputD * outputH * outputW;
outData += inLength;
inData += outLength;
}
}
}
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include "NEONFunctions.h"
#include <arm_neon.h>
namespace paddle {
namespace neon {
// b[i] = a[i] > 0.0f ? a[i] : 0.0f
void relu(const float* a, float* b, int len) {
int offset = len % 16;
float32x4_t ma0, ma1, ma2, ma3;
float32x4_t mb0, mb1, mb2, mb3;
float32x4_t zero = vdupq_n_f32(0.f);
for (int k = 0; k < len / 16; k++, a += 16, b += 16) {
ma0 = vld1q_f32(a);
ma1 = vld1q_f32(a + 4);
ma2 = vld1q_f32(a + 8);
ma3 = vld1q_f32(a + 12);
mb0 = vmaxq_f32(ma0, zero);
mb1 = vmaxq_f32(ma1, zero);
mb2 = vmaxq_f32(ma2, zero);
mb3 = vmaxq_f32(ma3, zero);
vst1q_f32(b, mb0);
vst1q_f32(b + 4, mb1);
vst1q_f32(b + 8, mb2);
vst1q_f32(b + 12, mb3);
}
for (int i = 0; i < offset; i++) {
b[i] = a[i] > 0.0f ? a[i] : 0.0f;
}
}
} // namespace neon
} // namespace paddle
#endif
......@@ -14,44 +14,10 @@ limitations under the License. */
#pragma once
#include <vector>
#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
namespace paddle {
namespace neon {
/**
* @brief A subclass of ConvBaseLayer that is a superclass of both
* ExpandConvLayer and ExpandConvTransLayer
*/
class ExpandConvBaseLayer : public ConvBaseLayer {
protected:
/// The transpose of output, which is an auxiliary matrix.
MatrixPtr transOutValue_;
public:
explicit ExpandConvBaseLayer(const LayerConfig& config)
: ConvBaseLayer(config) {}
~ExpandConvBaseLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
size_t getOutputSize();
/**
* Add shared bias.
*/
void addSharedBias();
/**
* Add unshared bias.
*/
void addUnsharedBias();
void bpropSharedBias(MatrixPtr biases, MatrixPtr v);
void bpropBiases(MatrixPtr v);
};
void relu(const float* a, float* b, int len);
} // namespace neon
} // namespace paddle
......@@ -825,9 +825,8 @@ void testMaxPoolFwdBwd(int numSamples,
int strideW,
int padH,
int padW) {
int outH = 0, outW = 0;
outH = (imgSizeH - ksizeH + 2 * padH + strideH - 1) / strideH + 1;
outW = (imgSizeW - ksizeW + 2 * padW + strideW - 1) / strideW + 1;
int outH = outputSize(imgSizeH, ksizeH, padH, strideH, true);
int outW = outputSize(imgSizeW, ksizeW, padW, strideW, true);
int inWidth = imgSizeH * imgSizeW * channels;
MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false);
......@@ -927,9 +926,8 @@ void testAvgPoolFwdBwd(int numSamples,
int strideW,
int padH,
int padW) {
int outH = 0, outW = 0;
outH = (imgSizeH - ksizeH + 2 * padH + strideH - 1) / strideH + 1;
outW = (imgSizeW - ksizeW + 2 * padW + strideW - 1) / strideW + 1;
int outH = outputSize(imgSizeH, ksizeH, padH, strideH, true);
int outW = outputSize(imgSizeW, ksizeW, padW, strideW, true);
int inWidth = imgSizeH * imgSizeW * channels;
MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false);
......
......@@ -23,10 +23,15 @@ class AccuracyOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Inference"),
"Input of Inference must be initialized.");
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("Inference"),
"Input(Inference) of AccuracyOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"),
"Input of Inference must be initialized.");
"Input(Label) of AccuracyOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Accuracy"),
"Output(Accuracy) of AccuracyOp should not be null.");
auto *inference = ctx.Input<framework::Tensor>("Inference");
auto *label = ctx.Input<framework::Tensor>("Label");
......
......@@ -12,26 +12,38 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/operators/accuracy_op.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
__global__ void AccuracySingleKernel(const int N, const int D, const int top_k,
const int* Xdata, const int* labelData,
float* accuracy) {
int correct = 0;
for (int row = 0; row < N; row++) {
const int label = labelData[row];
for (int col = 0; col < D; col++) {
const int pred = Xdata[row * D + col];
if (pred == label) {
++correct;
template <int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D, const int* Xdata,
const int* labeldata, float* accuracy) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
*accuracy = static_cast<float>(correct) / static_cast<float>(N);
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
if (threadIdx.x == 0) {
*accuracy = static_cast<float>(result) / static_cast<float>(N);
}
}
template <typename T>
......@@ -57,8 +69,8 @@ class AccuracyOpCUDAKernel : public framework::OpKernel {
return;
}
AccuracySingleKernel<<<1, 1>>>(num_samples, infer_width, 1, inference_data,
label_data, accuracy_data);
AccuracyCudaKernel<PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS>>>(
num_samples, infer_width, inference_data, label_data, accuracy_data);
}
};
......
......@@ -23,6 +23,13 @@ class AddOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of AddOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of AddOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of AddOp should not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(),
"Two input of Add Op's dimension must be same.");
......
......@@ -25,6 +25,9 @@ class ConcatOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of ConcatOp should not be null.");
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
......
......@@ -33,7 +33,8 @@ using DDim = framework::DDim;
void CondOp::CreateScope(const Scope& scope) const {
auto sub_scopes_var = scope.FindVar("SubScopes");
PADDLE_ENFORCE(sub_scopes_var != nullptr, "");
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var,
"Output(SubScopes) of CondOp should not be null.");
auto sub_scopes = sub_scopes_var->GetMutable<std::vector<Scope*>>();
auto& sub_scope = scope.NewScope();
sub_scopes->push_back(&sub_scope);
......@@ -41,7 +42,8 @@ void CondOp::CreateScope(const Scope& scope) const {
void CondOp::CreateIndexTensor(const Scope& scope) const {
auto index_tensors_var = scope.FindVar("IndexTensors");
PADDLE_ENFORCE(index_tensors_var != nullptr, "");
PADDLE_ENFORCE_NOT_NULL(index_tensors_var,
"Output(IndexTensors) of CondOp should not be null.");
auto& index_tensors =
*index_tensors_var->GetMutable<std::vector<LoDTensor>>();
index_tensors.push_back(LoDTensor());
......@@ -49,7 +51,8 @@ void CondOp::CreateIndexTensor(const Scope& scope) const {
void CondOp::InferShape(const Scope& scope) const {
auto sub_scopes_var = scope.FindVar("SubScopes");
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var);
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var,
"Output(SubScopes) of CondOp should not be null.");
auto& sub_scopes = *sub_scopes_var->GetMutable<std::vector<Scope*>>();
for (int i = 0; i < 2; ++i) {
......@@ -63,7 +66,8 @@ void CondOp::InferShape(const Scope& scope) const {
// branch
CreateIndexTensor(scope);
PADDLE_ENFORCE(!Inputs("Xs").empty(), "Inputs can't be empty");
PADDLE_ENFORCE(!Inputs("Xs").empty(),
"Inputs(Xs) of CondOp can't be empty.");
for (auto& input : Inputs("Xs")) {
// Create a new tensor in sub-scope for input-type tensor
Variable* v = sub_scopes[i]->NewVar(input);
......@@ -108,13 +112,18 @@ void CondOp::InferShape(const Scope& scope) const {
void CondOp::Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const {
auto* sub_scopes_var = scope.FindVar("SubScopes");
PADDLE_ENFORCE_NOT_NULL(sub_scopes_var,
"Output(SubScopes) of CondOp should not be null.");
auto sub_scopes = sub_scopes_var->Get<std::vector<Scope*>>();
auto* index_tensors_var = scope.FindVar("IndexTensors");
PADDLE_ENFORCE_NOT_NULL(index_tensors_var,
"Output(IndexTensors) of CondOp should not be null.");
auto index_tensors = index_tensors_var->Get<std::vector<LoDTensor>>();
std::string cond_name = Input("Cond");
Variable* cond_var = scope.FindVar(cond_name);
PADDLE_ENFORCE_NOT_NULL(cond_var);
PADDLE_ENFORCE_NOT_NULL(cond_var,
"Input(Cond) of CondOp should not be null.");
const LoDTensor* cond = cond_var->GetMutable<LoDTensor>();
// Step 1: get the true/false index at runtime
......@@ -171,6 +180,8 @@ void CondOp::Run(const Scope& scope,
}
// Step 4: merge output results
PADDLE_ENFORCE(!Outputs("Outs").empty(),
"Outputs(Outs) of CondOp can't be empty.");
for (int i = 0; i < 2; ++i) {
// i= 0/i for True and False branches respectively
for (auto& output : Outputs("Outs")) {
......
......@@ -26,8 +26,16 @@ class CosSimOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("XNorm"),
"Output(XNorm) of CosSimOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("YNorm"),
"Output(YNorm) of CosSimOp should not be null.");
// shape check
auto x_dims = ctx.Input<Tensor>("X")->dims();
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cross_entropy_op.h"
namespace paddle {
namespace operators {
using framework::LoDTensor;
class CrossEntropyOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"),
"Input(Label) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), "Output(Y) must not be null.");
auto x = ctx.Input<Tensor>("X");
auto label = ctx.Input<Tensor>("Label");
PADDLE_ENFORCE_EQ(x->dims().size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 2,
"Input(Label)'s rank must be 2.");
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("soft_label") == 0 ||
ctx.Attr<int>("soft_label") == 1);
PADDLE_ENFORCE_EQ(x->dims()[0], label->dims()[0],
"The 1st dimension of Input(X) and Input(Label) must "
"be equal.");
if (ctx.Attr<int>("soft_label") == 1) {
PADDLE_ENFORCE_EQ(x->dims()[1], label->dims()[1],
"If Attr(soft_label) == 1, The 2nd dimension of "
"Input(X) and Input(Label) must be equal.");
} else {
PADDLE_ENFORCE_EQ(label->dims()[1], 1,
"If Attr(soft_label) == 0, The 2nd dimension of "
"Input(Label) must be 1.");
}
ctx.Output<LoDTensor>("Y")->Resize({x->dims()[0], 1});
}
};
class CrossEntropyGradientOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"),
"Input(Label) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")),
"Input(Y@GRAD) must not be null.");
auto x = ctx.Input<Tensor>("X");
auto label = ctx.Input<Tensor>("Label");
auto dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_EQ(x->dims().size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE_EQ(dy->dims().size(), 2, "Input(Y@Grad)'s rank must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 2,
"Input(Label)'s rank must be 2.");
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("soft_label") == 0 ||
ctx.Attr<int>("soft_label") == 1);
PADDLE_ENFORCE_EQ(x->dims()[0], label->dims()[0],
"The 1st dimension of Input(X) and Input(Label) must "
"be equal.");
PADDLE_ENFORCE_EQ(x->dims()[0], dy->dims()[0],
"The 1st dimension of Input(X) and Input(Y@Grad) must "
"be equal.");
PADDLE_ENFORCE_EQ(dy->dims()[1], 1,
"The 2nd dimension of Input(Y@Grad) must be 1.");
if (ctx.Attr<int>("soft_label") == 1) {
PADDLE_ENFORCE_EQ(x->dims()[1], label->dims()[1],
"If Attr(soft_label) == 1, The 2nd dimension of "
"Input(X) and Input(Label) must be equal.");
} else {
PADDLE_ENFORCE_EQ(label->dims()[1], 1,
"If Attr(soft_label) == 0, The 2nd dimension of "
"Input(Label) must be 1.");
}
auto dx = ctx.Output<LoDTensor>(framework::GradVarName("X"));
dx->Resize(x->dims());
}
};
class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker {
public:
CrossEntropyOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of CrossEntropyOp");
AddInput("Label", "The second input of CrossEntropyOp");
AddOutput("Y", "The output of CrossEntropyOp");
AddAttr<int>("soft_label", "Is soft label. Default zero.").SetDefault(0);
AddComment(R"DOC(
CrossEntropy Operator.
It supports both standard cross-entropy and soft-label cross-entropy loss
computation.
1) One-hot cross-entropy:
soft_label = 0, Label[i, 0] indicates the class index for sample i:
Y[i] = -log(X[i, Label[i]])
2) Soft-label cross-entropy:
soft_label = 1, Label[i, j] indicates the soft label of class j
for sample i:
Y[i] = \sum_j{-Label[i, j] * log(X[i, j])}
Please make sure that in this case the summuation of each row of Label
equals one.
3) One-hot cross-entropy with vecterized Input(Label):
As a special case of 2), when each row of Input(Label) has only one
non-zero element (equals 1), soft-label cross-entropy degenerates to a
one-hot cross-entropy with one-hot label representation.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(cross_entropy, ops::CrossEntropyOp, ops::CrossEntropyOpMaker,
cross_entropy_grad, ops::CrossEntropyGradientOp);
REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel<float>);
REGISTER_OP_CPU_KERNEL(cross_entropy_grad,
ops::CrossEntropyGradientOpKernel<float>);
......@@ -13,14 +13,13 @@
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/utils.h"
#include "paddle/operators/cross_entropy_op.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
__global__ void CrossEntropyKernel(T* Y, const T* X, const int* label,
const int N, const int D) {
......@@ -29,7 +28,20 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int* label,
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
PADDLE_ASSERT(label[i] >= 0 && label[i] < D);
Y[i] = -math::tolerable_value(log(X[i * D + label[i]]));
Y[i] = -tolerable_value(log(X[i * D + label[i]]));
}
}
template <typename T>
__global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label,
const int N, const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
T sum = static_cast<T>(0);
for (int j = 0; j < D; j++) {
sum += label[i * D + j] * tolerable_value(log(X[i * D + j]));
}
Y[i] = -sum;
}
}
......@@ -56,58 +68,84 @@ __global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
}
template <typename T>
class OnehotCrossEntropyOpCUDAKernel : public framework::OpKernel {
__global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const T* label, const int N,
const int D) {
// TOOD(qingqing): optimize for this kernel
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
for (int j = 0; j < D; ++j) {
int idx = i * D + j;
dX[idx] = -label[idx] * dY[i] / X[idx];
}
}
}
template <typename T>
class CrossEntropyOpCUDAKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
"It must use GPUPlace.");
auto X = ctx.Input<Tensor>("X");
const T* Xdata = X->data<T>();
const int* label_data = ctx.Input<Tensor>("label")->data<int>();
auto Y = ctx.Output<Tensor>("Y");
Y->mutable_data<T>(ctx.GetPlace());
T* Ydata = Y->data<T>();
auto x = ctx.Input<Tensor>("X");
auto y = ctx.Output<Tensor>("Y");
auto label = ctx.Input<Tensor>("Label");
int N = X->dims()[0];
int D = X->dims()[1];
int block = 512;
int grid = (N + block - 1) / block;
auto* x_data = x->data<T>();
y->mutable_data<T>(ctx.GetPlace());
auto* y_data = y->data<T>();
int n = x->dims()[0];
int d = x->dims()[1];
int block = 512;
int grid = (n + block - 1) / block;
// TODO(qingqing) launch kernel on specified stream
// base on ExecutionContext.
CrossEntropyKernel<T><<<grid, block>>>(Ydata, Xdata, label_data, N, D);
if (ctx.Attr<int>("soft_label") == 1) {
auto* label_data = ctx.Input<Tensor>("Label")->data<T>();
SoftCrossEntropyKernel<T><<<grid, block>>>(y_data, x_data, label_data, n,
d);
} else {
auto* label_data = ctx.Input<Tensor>("Label")->data<int>();
CrossEntropyKernel<T><<<grid, block>>>(y_data, x_data, label_data, n, d);
}
}
};
template <typename T>
class OnehotCrossEntropyGradientOpCUDAKernel : public framework::OpKernel {
class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto X = ctx.Input<Tensor>("X");
auto dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dY = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto label = ctx.Input<Tensor>("label");
auto x = ctx.Input<Tensor>("X");
auto dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto label = ctx.Input<Tensor>("Label");
auto* dXdata = dX->template mutable_data<T>(ctx.GetPlace());
auto* dYdata = dY->template data<T>();
auto* Xdata = X->template data<T>();
auto* label_data = label->data<int>();
auto* dx_data = dx->mutable_data<T>(ctx.GetPlace());
auto* dy_data = dy->data<T>();
auto* x_data = x->data<T>();
int N = X->dims()[0];
int D = X->dims()[1];
int n = x->dims()[0];
int d = x->dims()[1];
int block = 512;
int grid = (N * D + block - 1) / block;
zero<T><<<grid, block>>>(dXdata, N * D);
grid = (N + block - 1) / block;
int grid = (n * d + block - 1) / block;
zero<T><<<grid, block>>>(dx_data, n * d);
grid = (n + block - 1) / block;
// TODO(qingqing): launch kernel on specified stream
// base on ExecutionContext.
CrossEntropyGradientKernel<T><<<grid, block>>>(dXdata, dYdata, Xdata,
label_data, N, D);
if (ctx.Attr<int>("soft_label") == 1) {
auto* label_data = label->data<T>();
SoftCrossEntropyGradientKernel<T><<<grid, block>>>(
dx_data, dy_data, x_data, label_data, n, d);
} else {
auto* label_data = label->data<int>();
CrossEntropyGradientKernel<T><<<grid, block>>>(dx_data, dy_data, x_data,
label_data, n, d);
}
}
};
......@@ -115,7 +153,6 @@ class OnehotCrossEntropyGradientOpCUDAKernel : public framework::OpKernel {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(onehot_cross_entropy,
ops::OnehotCrossEntropyOpCUDAKernel<float>);
REGISTER_OP_GPU_KERNEL(onehot_cross_entropy_grad,
ops::OnehotCrossEntropyGradientOpCUDAKernel<float>);
REGISTER_OP_GPU_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel<float>);
REGISTER_OP_GPU_KERNEL(cross_entropy_grad,
ops::CrossEntropyGradientOpCUDAKernel<float>);
......@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/utils.h"
#include "paddle/platform/hostdevice.h"
namespace paddle {
namespace operators {
......@@ -22,56 +22,93 @@ namespace operators {
using Tensor = framework::Tensor;
template <typename T>
class OnehotCrossEntropyOpKernel : public framework::OpKernel {
HOSTDEVICE T tolerable_value(const T x) {
PADDLE_ASSERT(std::is_floating_point<T>::value);
const T kApproInf = 1e20;
if (x == INFINITY) {
return kApproInf;
}
if (x == -INFINITY) {
return -kApproInf;
}
return x;
}
template <typename T>
class CrossEntropyOpKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
auto X = ctx.Input<Tensor>("X");
const T* Xdata = X->data<T>();
const int* label_data = ctx.Input<Tensor>("label")->data<int>();
auto Y = ctx.Output<Tensor>("Y");
Y->mutable_data<T>(ctx.GetPlace());
T* Ydata = Y->data<T>();
const int batch_size = X->dims()[0];
const int class_num = X->dims()[1];
for (int i = 0; i < batch_size; ++i) {
int index = i * class_num + label_data[i];
Ydata[i] = -math::tolerable_value(std::log(Xdata[index]));
auto x = ctx.Input<Tensor>("X");
auto y = ctx.Output<Tensor>("Y");
auto* x_data = x->data<T>();
y->mutable_data<T>(ctx.GetPlace());
auto* y_data = y->data<T>();
int batch_size = x->dims()[0];
int class_num = x->dims()[1];
if (ctx.Attr<int>("soft_label") == 1) {
auto* label_data = ctx.Input<Tensor>("Label")->data<T>();
int index = 0;
for (int i = 0; i < batch_size; ++i) {
T sum = static_cast<T>(0);
for (int j = 0; j < class_num; ++j) {
sum += label_data[index] * tolerable_value(std::log(x_data[index]));
y_data[i] = -sum;
index++;
}
}
} else {
auto* label_data = ctx.Input<Tensor>("Label")->data<int>();
for (int i = 0; i < batch_size; ++i) {
int index = i * class_num + label_data[i];
y_data[i] = -tolerable_value(std::log(x_data[index]));
}
}
}
};
template <typename T>
class OnehotCrossEntropyGradientOpKernel : public framework::OpKernel {
class CrossEntropyGradientOpKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
auto X = ctx.Input<Tensor>("X");
auto dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dY = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto label = ctx.Input<Tensor>("label");
auto x = ctx.Input<Tensor>("X");
auto dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto label = ctx.Input<Tensor>("Label");
auto* dXdata = dX->template mutable_data<T>(ctx.GetPlace());
auto* dYdata = dY->template data<T>();
auto* Xdata = X->template data<T>();
auto* label_data = label->data<int>();
auto* dx_data = dx->mutable_data<T>(ctx.GetPlace());
auto* dy_data = dy->data<T>();
auto* x_data = x->data<T>();
const int batch_size = X->dims()[0];
const int class_num = X->dims()[1];
int batch_size = x->dims()[0];
int class_num = x->dims()[1];
// TODO(qingqing): make zero setting an common function.
memset(dXdata, 0, sizeof(T) * batch_size * class_num);
for (int i = 0; i < batch_size; ++i) {
int index = i * class_num + label_data[i];
dXdata[index] = -math::tolerable_value(dYdata[i] / Xdata[index]);
if (ctx.Attr<int>("soft_label") == 1) {
auto* label_data = ctx.Input<Tensor>("Label")->data<T>();
int index = 0;
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < class_num; ++j) {
dx_data[index] = -label_data[index] * dy_data[i] / x_data[index];
index++;
}
}
} else {
auto* label_data = label->data<int>();
memset(dx_data, 0, sizeof(T) * batch_size * class_num);
for (int i = 0; i < batch_size; ++i) {
PADDLE_ASSERT(label_data[i] >= 0 || label_data[i] < class_num);
int index = i * class_num + label_data[i];
dx_data[index] = -dy_data[i] / x_data[index];
}
}
}
};
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/dropout_op.h"
namespace paddle {
namespace operators {
using framework::Tensor;
using framework::LoDTensor;
class DropoutOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_GE(ctx.Attr<float>("dropout_prob"), 0);
PADDLE_ENFORCE_LE(ctx.Attr<float>("dropout_prob"), 1);
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("is_training") == 0 ||
ctx.Attr<int>("is_training") == 1);
auto dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<LoDTensor>("Out")->Resize(dims);
if (ctx.Attr<int>("is_training") == 1) {
ctx.Output<LoDTensor>("Mask")->Resize(dims);
}
}
};
template <typename AttrType>
class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
public:
DropoutOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<AttrType>("dropout_prob", "Probability of setting units to zero.")
.SetDefault(.5f);
// TODO(xinghai-sun): use bool for is_training after bool is supported.
AddAttr<int>("is_training", "Whether in training phase.").SetDefault(1);
AddAttr<int>("seed", "Dropout random seed.").SetDefault(0);
AddInput("X", "The input of dropout op.");
AddOutput("Out", "The output of dropout op.");
AddOutput("Mask", "The random sampled dropout mask.").AsIntermediate();
AddComment(R"DOC(
Dropout Operator.
"Dropout" refers to randomly dropping out units in a nerual network. It is a
regularization technique for reducing overfitting by preventing neuron
co-adaption during training. The dropout operator randomly set (according to
the given dropout probability) the outputs of some units to zero, while others
being set to their inputs.
)DOC");
}
};
template <typename AttrType>
class DropoutOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.Attr<int>("is_training"), 1,
"GradOp is only callable when is_training is true");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Mask"), "Mask must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) must not be null.");
PADDLE_ENFORCE_GE(ctx.Attr<AttrType>("dropout_prob"), 0);
PADDLE_ENFORCE_LE(ctx.Attr<AttrType>("dropout_prob"), 1);
// TODO(xinghai-sun): remove this check after swtiching to bool
PADDLE_ENFORCE(ctx.Attr<int>("is_training") == 0 ||
ctx.Attr<int>("is_training") == 1);
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_EQ(x_dims, out_dims,
"Dimensions of Input(X) and Out@Grad must be the same.");
auto mask_dims = ctx.Input<Tensor>("Mask")->dims();
PADDLE_ENFORCE_EQ(x_dims, mask_dims,
"Dimensions of Input(X) and Mask must be the same.");
auto *x_grad = ctx.Output<LoDTensor>(framework::GradVarName("X"));
x_grad->Resize(x_dims);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(dropout, ops::DropoutOp, ops::DropoutOpMaker<float>, dropout_grad,
ops::DropoutOpGrad<float>);
REGISTER_OP_CPU_KERNEL(
dropout, ops::CPUDropoutKernel<paddle::platform::CPUPlace, float, float>);
REGISTER_OP_CPU_KERNEL(
dropout_grad, ops::DropoutGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include "paddle/operators/dropout_op.h"
namespace paddle {
namespace operators {
template <typename T, typename AttrType>
struct MaskGenerator {
AttrType dropout_prob;
int seed;
__host__ __device__ MaskGenerator(AttrType dropout_prob, int seed)
: dropout_prob(dropout_prob), seed(seed) {}
__host__ __device__ T operator()(const unsigned int n) const {
thrust::minstd_rand rng;
rng.seed(seed);
thrust::uniform_real_distribution<AttrType> dist(0, 1);
rng.discard(n);
if (dist(rng) < dropout_prob) {
return static_cast<T>(0);
} else {
return static_cast<T>(1);
}
}
};
// It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename Place, typename T, typename AttrType>
class GPUDropoutKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* y = context.Output<Tensor>("Out");
y->mutable_data<T>(context.GetPlace());
AttrType dropout_prob = context.Attr<AttrType>("dropout_prob");
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto place = context.GetEigenDevice<Place>();
if (context.Attr<int>("is_training") == 1) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<T>(context.GetPlace());
int size = framework::product(mask->dims());
int seed = context.Attr<int>("seed");
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
thrust::transform(index_sequence_begin, index_sequence_begin + size,
thrust::device_ptr<T>(mask_data),
MaskGenerator<T, AttrType>(dropout_prob, seed));
auto M = EigenMatrix<T>::Reshape(*mask, 1);
Y.device(place) = X * M;
} else {
Y.device(place) = X * dropout_prob;
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
dropout, ops::GPUDropoutKernel<paddle::platform::GPUPlace, float, float>);
REGISTER_OP_GPU_KERNEL(
dropout_grad, ops::DropoutGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <random>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T, typename AttrType>
class CPUDropoutKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* y = context.Output<Tensor>("Out");
const auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
AttrType dropout_prob = context.Attr<AttrType>("dropout_prob");
if (context.Attr<int>("is_training") == 1) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<T>(context.GetPlace());
int seed = context.Attr<int>("seed");
std::minstd_rand engine;
engine.seed(seed);
std::uniform_real_distribution<AttrType> dist(0, 1);
size_t size = framework::product(mask->dims());
for (size_t i = 0; i < size; ++i) {
if (dist(engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
mask_data[i] = 1;
y_data[i] = x_data[i];
}
}
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto place = context.GetEigenDevice<Place>();
Y.device(place) = X * dropout_prob;
}
}
};
template <typename Place, typename T>
class DropoutGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(context.Attr<int>("is_training"), 1,
"GradOp is only callable when is_training is true");
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* mask = context.Input<Tensor>("Mask");
grad_x->mutable_data<T>(context.GetPlace());
auto M = EigenMatrix<T>::Reshape(*mask, 1);
auto dX = EigenMatrix<T>::Reshape(*grad_x, 1);
auto dY = EigenMatrix<T>::Reshape(*grad_y, 1);
auto place = context.GetEigenDevice<Place>();
dX.device(place) = dY * M;
}
};
} // namespace operators
} // namespace paddle
......@@ -25,8 +25,14 @@ class ElementWiseMulOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of ElementWiseMulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of ElementWiseMulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of ElementWiseMulOp should not be null.");
auto x_dim = ctx.Input<Tensor>("X")->dims();
auto y_dim = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(),
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h"
namespace paddle {
namespace operators {
class FCOp : public NetOp {
public:
FCOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
PADDLE_ENFORCE(!Inputs("X").empty(),
"Inputs(X) of FCOp should not be null.");
PADDLE_ENFORCE(!Inputs("W").empty(),
"Inputs(W) of FCOp should not be null.");
PADDLE_ENFORCE(!Outputs("MulOut").empty(),
"Outputs(MulOut) of FCOp should not be null.");
PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName,
"Output(Out) of FCOp should not be null.");
auto x = Inputs("X");
auto w = Inputs("W");
auto mul_out = Outputs("MulOut");
PADDLE_ENFORCE_EQ(
x.size(), w.size(),
"The size of inputs X(%d) should be the same as that of weights W(%d).",
x.size(), w.size());
PADDLE_ENFORCE_EQ(mul_out.size(), x.size(),
"The size of intermediate mul_out(%d) should be the same "
"as that of inputs X(%d).",
mul_out.size(), x.size());
size_t n = x.size();
PADDLE_ENFORCE_GE(n, static_cast<size_t>(1),
"The size of inputs X(%d) should be no less than 1.", n);
auto x_num_col_dims = Attr<std::vector<int>>("xNumColDims");
// Set all values or set no values (use the default value)
if (!x_num_col_dims.empty()) {
PADDLE_ENFORCE_EQ(x_num_col_dims.size(), n,
"The size of attribute xNumColDims(%d) should be the "
"same as that of inputs X(%d).",
x_num_col_dims.size(), n);
} else {
x_num_col_dims.resize(n);
for (size_t i = 0; i < n; i++) {
x_num_col_dims[i] = 1;
}
}
// mul_out[i] = X[i] * W[i]
for (size_t i = 0; i < n; i++) {
framework::AttributeMap mul_attr;
mul_attr["x_num_col_dims"] = static_cast<int>(x_num_col_dims[i]);
mul_attr["y_num_col_dims"] = static_cast<int>(1);
AppendOp(
framework::OpRegistry::CreateOp("mul", {{"X", {x[i]}}, {"Y", {w[i]}}},
{{"Out", {mul_out[i]}}}, mul_attr));
}
// sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1]
auto sum_out = mul_out[0];
if (n > 1) {
PADDLE_ENFORCE_NE(Output("SumOut"), framework::kEmptyVarName,
"Output(SumOut) of FCOp should not be null when the "
"size of Inputs(X) > 1.");
sum_out = Output("SumOut");
AppendOp(framework::OpRegistry::CreateOp("sum", {{"X", {mul_out}}},
{{"Out", {sum_out}}}, {}));
} else {
if (Output("SumOut") != framework::kEmptyVarName) {
this->Rename(Output("SumOut"), framework::kEmptyVarName);
}
}
// add_out = sum_out + b
auto b = Input("B");
auto add_out = sum_out;
if (b != framework::kEmptyVarName) {
PADDLE_ENFORCE_NE(
Output("AddOut"), framework::kEmptyVarName,
"Output(AddOut) of FCOp should not be null when Input(B) is set.");
add_out = Output("AddOut");
AppendOp(framework::OpRegistry::CreateOp(
"rowwise_add", {{"X", {sum_out}}, {"b", {Input("B")}}},
{{"Out", {add_out}}}, {}));
} else {
if (Output("AddOut") != framework::kEmptyVarName) {
this->Rename(Output("AddOut"), framework::kEmptyVarName);
}
}
auto activation = Attr<std::string>("activation");
AppendOp(framework::OpRegistry::CreateOp(activation, {{"X", {add_out}}},
{{"Y", {Output("Out")}}}, {}));
CompleteAddOp(false);
}
};
class FCOpMaker : public framework::OpProtoAndCheckerMaker {
public:
FCOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X",
"(A vector of Tensors) each input Tensor can be of arbitrary "
"dimension, and will be reshaped to a 2-D matrix of size "
"(minibatch, number_of_input_features) according to attribute "
"xNumColDims.")
.AsDuplicable();
AddInput("W",
"(A vector of Tensors) the weights of FC operator, a "
"vector of 2-D matrix of size "
"(number_of_input_features, number_of_neurons).")
.AsDuplicable();
AddInput("B",
"(Tensor) the bias of FC operator, a 1-D vector of size "
"number_of_neurons.");
AddOutput("Out",
"(Tensor) the activated output matrix of FC operator, a 2-D "
"matrix of size (minibatch, number_of_neurons).");
AddOutput("MulOut",
"(A vector of Tensors) the intermediate outputs of FC operator, "
"each Tensor saving the product of X_i * W_i.")
.AsIntermediate()
.AsDuplicable();
AddOutput(
"SumOut",
"(Tensor) the intermediate output of FC operator, "
"saving the sum of the products of X and W, that is sum{X_i * W_i}.")
.AsIntermediate();
AddOutput("AddOut",
"(Tensor) the non-actived output of FC operator, "
"saving sum{X_i * W_i} + B.")
.AsIntermediate();
AddAttr<std::string>(
"activation",
"(string, default identity) the activation type of FC operator.")
.SetDefault("identity")
.InEnum({"identity", "sigmoid", "softmax"});
AddAttr<std::vector<int>>(
"xNumColDims",
"(std::vector<int>) The inputs Tensors of FC operator can be of "
"more than 2 dimensions. In that case, each input Tensor `X_i` will be "
"reshaped to a 2-D matrix. The matrix's first dimension "
"(the length of column) will be the product of `X_i`'s last "
"`xNumColDims_i` dimensions, that is "
"`X_i.dims[0] x ... x X_i.dims[xNumColDims_i - 1]`. "
"The matrix's second dimension (the length of row) will be the product "
"of `X_i`'s first `rank - xNumColDims_i` dimensions, that is "
"`X_i.dims[xNumColDims_i] x ... x X_i.dims[rank - 1]`)")
.SetDefault(std::vector<int>{});
AddComment(R"DOC(
Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer
in Convolutional Neural Networks. Neurons in a fully connected layer have
full connections to all activations in the previous layer.
It computes an inner product of a set of
learned weights with a matrix multiplication followed by a bias offset
(optionally).
Equation:
Out = Act(sum_n{X_i * W_i} + B)
where X_i is Tensor that will be reshaped to a 2-D matrix of size (M x K),
usually M is the minibatch size and K is the number of input features.
W_i is a 2-D matrix of size (K x N), where N means the number of neurons
in the fully connected layer. B is a 1-D vector of size N.
Thus, the output Out is a 2-D matrix of size (M x N).
Activation type can be set to `identity` (default), `sigmoid` or `softmax`.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(fc, ops::FCOp, ops::FCOpMaker);
......@@ -23,6 +23,13 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("Src"),
"Input(Src) of FillZerosLikeOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Dst"),
"Output(Dst) of FillZerosLikeOp should not be null.");
ctx.Output<framework::LoDTensor>("Dst")->Resize(
ctx.Input<framework::Tensor>("Src")->dims());
}
......
......@@ -24,6 +24,13 @@ class GatherOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of GatherOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Index"),
"Input(Index) of GatherOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of GatherOp should not be null.");
int batch_size = ctx.Input<Tensor>("Index")->dims()[0];
PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0");
framework::DDim output_dims(ctx.Input<Tensor>("X")->dims());
......
......@@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext& context) const override {
auto* tensor = context.Output<framework::LoDTensor>("Out");
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of GaussianRandomOp should not be null.");
auto* tensor = ctx.Output<framework::LoDTensor>("Out");
auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp;
temp.reserve(dims.size());
......
......@@ -27,7 +27,7 @@ class IdentityOpMaker : public framework::OpProtoAndCheckerMaker {
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of identity operator.");
AddOutput("Out", "The output tensor of identity operator.");
AddOutput("Y", "The output tensor of identity operator.");
AddComment(R"DOC(
The identity operator is an alias of the scale operator
with the attribute scale fixed to 1.0.
......@@ -42,9 +42,15 @@ class IdentityOp : public NetOp {
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName,
"Input(X) of IdentityOp should not be null.");
PADDLE_ENFORCE_NE(Output("Y"), framework::kEmptyVarName,
"Output(Y) of IdentityOp should not be null.");
AppendOp(framework::OpRegistry::CreateOp(
"scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}},
"scale", {{"X", {Input("X")}}}, {{"Out", {Output("Y")}}},
{{"scale", static_cast<AttrType>(1)}}));
CompleteAddOp(false);
}
};
......
......@@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &context) const override {
auto table_t = context.Input<Tensor>("W");
auto ids_t = context.Input<Tensor>("Ids");
auto output_t = context.Output<framework::LoDTensor>("Out");
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("W"),
"Input(W) of LookupTableOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Ids"),
"Input(Ids) of LookupTableOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of LookupTableOp should not be null.");
auto table_t = ctx.Input<Tensor>("W");
auto ids_t = ctx.Input<Tensor>("Ids");
auto output_t = ctx.Output<framework::LoDTensor>("Out");
output_t->Resize({ids_t->dims()[0], table_t->dims()[1]});
}
......
......@@ -19,12 +19,13 @@ namespace operators {
namespace math {
template <>
void gemm<platform::CPUPlace, float>(const CBLAS_TRANSPOSE transA,
void gemm<platform::CPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta, float* C,
platform::DeviceContext* context) {
const float* B, const float beta,
float* C) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
......@@ -33,13 +34,13 @@ void gemm<platform::CPUPlace, float>(const CBLAS_TRANSPOSE transA,
}
template <>
void gemm<platform::CPUPlace, double>(const CBLAS_TRANSPOSE transA,
void gemm<platform::CPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C,
platform::DeviceContext* context) {
double* C) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
int ldc = N;
......@@ -48,13 +49,10 @@ void gemm<platform::CPUPlace, double>(const CBLAS_TRANSPOSE transA,
}
template <>
void matmul<platform::CPUPlace, float>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, float alpha,
framework::Tensor* matrix_out,
float beta,
platform::DeviceContext* context) {
void matmul<platform::CPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
......@@ -74,18 +72,15 @@ void matmul<platform::CPUPlace, float>(const framework::Tensor& matrix_a,
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CPUPlace, float>(
transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::CPUPlace, double>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, double alpha,
framework::Tensor* matrix_out,
double beta,
platform::DeviceContext* context) {
void matmul<platform::CPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
......@@ -105,8 +100,8 @@ void matmul<platform::CPUPlace, double>(const framework::Tensor& matrix_a,
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::CPUPlace, double>(
transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
......
......@@ -19,12 +19,13 @@ namespace operators {
namespace math {
template <>
void gemm<platform::GPUPlace, float>(const CBLAS_TRANSPOSE transA,
void gemm<platform::GPUPlace, float>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const float alpha, const float* A,
const float* B, const float beta, float* C,
platform::DeviceContext* context) {
const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
......@@ -35,18 +36,19 @@ void gemm<platform::GPUPlace, float>(const CBLAS_TRANSPOSE transA,
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasSgemm(
reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void gemm<platform::GPUPlace, double>(const CBLAS_TRANSPOSE transA,
void gemm<platform::GPUPlace, double>(const platform::DeviceContext& context,
const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M,
const int N, const int K,
const double alpha, const double* A,
const double* B, const double beta,
double* C,
platform::DeviceContext* context) {
double* C) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (transA == CblasNoTrans) ? K : M;
......@@ -56,18 +58,16 @@ void gemm<platform::GPUPlace, double>(const CBLAS_TRANSPOSE transA,
cublasOperation_t cuTransB =
(transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
PADDLE_ENFORCE(platform::dynload::cublasDgemm(
reinterpret_cast<platform::CUDADeviceContext*>(context)->cublas_handle(),
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.cublas_handle(),
cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void matmul<platform::GPUPlace, float>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, float alpha,
framework::Tensor* matrix_out,
float beta,
platform::DeviceContext* context) {
void matmul<platform::GPUPlace, float>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha,
framework::Tensor* matrix_out, float beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
......@@ -87,18 +87,15 @@ void matmul<platform::GPUPlace, float>(const framework::Tensor& matrix_a,
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, float>(
transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>(), context);
context, transA, transB, M, N, K, alpha, matrix_a.data<float>(),
matrix_b.data<float>(), beta, matrix_out->data<float>());
}
template <>
void matmul<platform::GPUPlace, double>(const framework::Tensor& matrix_a,
bool trans_a,
const framework::Tensor& matrix_b,
bool trans_b, double alpha,
framework::Tensor* matrix_out,
double beta,
platform::DeviceContext* context) {
void matmul<platform::GPUPlace, double>(
const platform::DeviceContext& context, const framework::Tensor& matrix_a,
bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha,
framework::Tensor* matrix_out, double beta) {
auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims();
......@@ -118,8 +115,8 @@ void matmul<platform::GPUPlace, double>(const framework::Tensor& matrix_a,
CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans;
gemm<platform::GPUPlace, double>(
transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>(), context);
context, transA, transB, M, N, K, alpha, matrix_a.data<double>(),
matrix_b.data<double>(), beta, matrix_out->data<double>());
}
} // namespace math
......
......@@ -66,16 +66,16 @@ namespace math {
// For more detailed info, please refer to
// http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html
template <typename Place, typename T>
void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB,
const int M, const int N, const int K, const T alpha, const T* A,
const T* B, const T beta, T* C, platform::DeviceContext* context);
void gemm(const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const int M, const int N, const int K,
const T alpha, const T* A, const T* B, const T beta, T* C);
// matrix multiply with continuous memory
template <typename Place, typename T>
void matmul(const framework::Tensor& matrix_a, bool trans_a,
void matmul(const platform::DeviceContext& context,
const framework::Tensor& matrix_a, bool trans_a,
const framework::Tensor& matrix_b, bool trans_b, T alpha,
framework::Tensor* matrix_out, T beta,
platform::DeviceContext* context);
framework::Tensor* matrix_out, T beta);
} // namespace math
} // namespace operators
......
......@@ -15,8 +15,7 @@ TEST(math_function, notrans_mul_trans) {
memcpy(input1_ptr, arr, 6 * sizeof(float));
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::DeviceContext* context =
new paddle::platform::CUDADeviceContext(*gpu_place);
paddle::platform::CUDADeviceContext context(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place);
input2_gpu.CopyFrom<float>(input1, *gpu_place);
......@@ -24,7 +23,7 @@ TEST(math_function, notrans_mul_trans) {
out_gpu.mutable_data<float>({2, 2}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0, context);
context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);
out.CopyFrom<float>(out_gpu, *cpu_place);
......@@ -33,6 +32,7 @@ TEST(math_function, notrans_mul_trans) {
EXPECT_EQ(out_ptr[1], 14);
EXPECT_EQ(out_ptr[2], 14);
EXPECT_EQ(out_ptr[3], 50);
delete gpu_place;
}
TEST(math_function, trans_mul_notrans) {
......@@ -48,8 +48,7 @@ TEST(math_function, trans_mul_notrans) {
memcpy(input1_ptr, arr, 6 * sizeof(float));
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::platform::DeviceContext* context =
new paddle::platform::CUDADeviceContext(*gpu_place);
paddle::platform::CUDADeviceContext context(*gpu_place);
input1_gpu.CopyFrom<float>(input1, *gpu_place);
input2_gpu.CopyFrom<float>(input1, *gpu_place);
......@@ -57,7 +56,7 @@ TEST(math_function, trans_mul_notrans) {
out_gpu.mutable_data<float>({3, 3}, *gpu_place);
paddle::operators::math::matmul<paddle::platform::GPUPlace, float>(
input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0, context);
context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);
out.CopyFrom<float>(out_gpu, *cpu_place);
......@@ -71,5 +70,6 @@ TEST(math_function, trans_mul_notrans) {
EXPECT_EQ(out_ptr[6], 15);
EXPECT_EQ(out_ptr[7], 22);
EXPECT_EQ(out_ptr[8], 29);
delete gpu_place;
}
#endif
......@@ -24,7 +24,9 @@ class MeanOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of MeanOp must be initialized.");
"Input(X) of MeanOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MeanOp should not be null.");
ctx.Output<framework::LoDTensor>("Out")->Resize({1});
}
};
......
......@@ -27,6 +27,13 @@ class MinusOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of MinusOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of MinusOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MinusOp should not be null.");
auto *left_tensor = ctx.Input<framework::Tensor>("X");
auto *right_tensor = ctx.Input<framework::Tensor>("Y");
......@@ -64,7 +71,7 @@ class MinusGradOp : public NetOp {
// x_grad = out_grad
AppendOp(framework::OpRegistry::CreateOp("identity", {{"X", {out_grad}}},
{{"Out", {x_grad}}}, {}));
{{"Y", {x_grad}}}, {}));
framework::AttributeMap scale_attr;
scale_attr["scale"] = static_cast<AttrType>(-1);
......
......@@ -26,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of MulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Input(Y) of MulOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of MulOp should not be null.");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
int x_num_col_dims = Attr<int>("x_num_col_dims");
......
......@@ -46,10 +46,8 @@ class MulKernel : public framework::OpKernel {
: *y;
z->mutable_data<T>(context.GetPlace());
auto* device_context =
const_cast<platform::DeviceContext*>(context.device_context_);
math::matmul<Place, T>(x_matrix, false, y_matrix, false, 1, z, 0,
device_context);
math::matmul<Place, T>(context.device_context(), x_matrix, false, y_matrix,
false, 1, z, 0);
}
};
......@@ -71,16 +69,14 @@ class MulGradKernel : public framework::OpKernel {
Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
Tensor* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
auto* device_context =
const_cast<platform::DeviceContext*>(ctx.device_context_);
if (dx) {
dx->mutable_data<T>(ctx.GetPlace());
Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix<T>(
*dx, x_num_col_dims)
: *dx;
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N
math::matmul<Place, T>(*dout, false, y_matrix, true, 1, &dx_matrix, 0,
device_context);
math::matmul<Place, T>(ctx.device_context(), *dout, false, y_matrix, true,
1, &dx_matrix, 0);
}
if (dy) {
dy->mutable_data<T>(ctx.GetPlace());
......@@ -88,8 +84,8 @@ class MulGradKernel : public framework::OpKernel {
*dy, y_num_col_dims)
: *dy;
// dy = x' * dout. dy K x N, dout : M x N, x : M x K
math::matmul<Place, T>(x_matrix, true, *dout, false, 1, &dy_matrix, 0,
device_context);
math::matmul<Place, T>(ctx.device_context(), x_matrix, true, *dout, false,
1, &dy_matrix, 0);
}
}
};
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/onehot_cross_entropy_op.h"
namespace paddle {
namespace operators {
class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X");
auto *label = ctx.Input<Tensor>("label");
PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1.");
PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]);
ctx.Output<framework::LoDTensor>("Y")->Resize({X->dims()[0], 1});
}
};
class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto dX = ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto X = ctx.Input<Tensor>("X");
dX->Resize(X->dims());
}
};
class OnehotCrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker {
public:
OnehotCrossEntropyOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of OnehotCrossEntropyOp");
AddInput("label", "The second input of OnehotCrossEntropyOp");
AddOutput("Y", "The output of OnehotCrossEntropyOp");
AddComment(R"DOC(
OnehotCrossEntropy Operator.
Y[i] = -log(X[i][j])
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp,
ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad,
ops::OnehotCrossEntropyGradientOp);
REGISTER_OP_CPU_KERNEL(onehot_cross_entropy,
ops::OnehotCrossEntropyOpKernel<float>);
REGISTER_OP_CPU_KERNEL(onehot_cross_entropy_grad,
ops::OnehotCrossEntropyGradientOpKernel<float>);
......@@ -25,6 +25,11 @@ class PadOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of PadOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of PadOp should not be null.");
auto x_dim = ctx.Input<Tensor>("X")->dims();
auto paddings = Attr<std::vector<int>>("paddings");
PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()),
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/prelu_op.h"
#include "paddle/operators/net_op.h"
namespace paddle {
namespace operators {
class PReluOp : public framework::OperatorWithKernel {
public:
PReluOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
auto *in = ctx.Input<framework::Tensor>("X");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Alpha"),
"Input(Alpha) should not be null");
auto *alpha = ctx.Input<framework::Tensor>("Alpha");
PADDLE_ENFORCE(alpha->numel() == 1, "Size of weight Alpha must be one.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) should not be null");
auto *out = ctx.Output<framework::LoDTensor>("Out");
out->Resize(in->dims());
}
};
class PReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
PReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of prelu operator.");
AddInput("Alpha", "The alpha weight of PRelu operator.");
AddOutput("Out", "The output tensor of PRelu operator.");
AddComment(R"DOC(PRelu operator
The equation is:
f(x) = alpha * x , for x < 0
f(x) = x , for x >= 0
)DOC");
}
};
// The operator to calculate gradients of a prelu operator.
class PReluGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
auto *dx = ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
auto *x = ctx.Input<framework::Tensor>("X");
auto *dalpha =
ctx.Output<framework::LoDTensor>(framework::GradVarName("Alpha"));
auto *alpha = ctx.Input<framework::Tensor>("Alpha");
dx->Resize(x->dims());
dalpha->Resize(alpha->dims());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(prelu, ops::PReluOp, ops::PReluOpMaker, prelu_grad,
ops::PReluGradOp);
REGISTER_OP_CPU_KERNEL(prelu,
ops::PReluKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(prelu_grad,
ops::PReluGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/prelu_op.h"
REGISTER_OP_GPU_KERNEL(
prelu, paddle::operators::PReluKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
prelu_grad,
paddle::operators::PReluGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/platform/transform.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using platform::Transform;
template <typename T>
class PReluFunctor {
public:
explicit PReluFunctor(const T* alpha) : alpha_(alpha) {}
HOSTDEVICE T operator()(const T& x) const {
if (x > 0)
return x;
else
return x * (*alpha_);
}
private:
const T* alpha_;
};
template <typename Place, typename T>
class PReluKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* out = context.Output<Tensor>("Out");
const T* x_ptr = x->data<T>();
T* o_ptr = out->mutable_data<T>(context.GetPlace());
auto* alpha_ptr = alpha->data<T>();
int numel = x->numel();
Transform<Place> trans;
trans(context.device_context(), x_ptr, x_ptr + numel, o_ptr,
PReluFunctor<T>(alpha_ptr));
}
};
template <typename T>
class PReluGradFunctor {
public:
explicit PReluGradFunctor(const T* alpha) : alpha_(alpha) {}
HOSTDEVICE T operator()(const T& out, const T& dout) const {
if (out > 0)
return dout;
else
return dout * (*alpha_);
}
private:
const T* alpha_;
};
template <typename Place, typename T>
class PReluGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
auto* dout = context.Input<Tensor>(framework::GradVarName("Out"));
auto* out = context.Input<Tensor>("Out");
auto* alpha = context.Input<Tensor>("Alpha");
auto* alpha_ptr = alpha->data<T>();
T* dx_ptr = dx->mutable_data<T>(context.GetPlace());
const T* dout_ptr = dout->data<T>();
const T* out_ptr = out->data<T>();
int numel = dx->numel();
Transform<Place> trans;
trans(context.device_context(), out_ptr, out_ptr + numel, dout_ptr, dx_ptr,
PReluGradFunctor<T>(alpha_ptr));
// TODO (Zhuoyuan): add dalpha upgrade when GPU kernels ready
}
};
} // namespace operators
} // namespace paddle
......@@ -28,7 +28,11 @@ class ReshapeOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// input check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of ReshapeOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of ReshapeOp should not be null.");
auto shape = ctx.Attr<std::vector<int>>("shape");
PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty.");
for (auto dim : shape) {
......
......@@ -25,6 +25,13 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of RowwiseAddOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("b"),
"Input(b) of RowwiseAddOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of RowwiseAddOp should not be null.");
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto b_dims = ctx.Input<Tensor>("b")->dims();
PADDLE_ENFORCE_GT(
......
......@@ -27,6 +27,11 @@ class ScaleOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of ScaleOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of ScaleOp should not be null.");
auto *in = ctx.Input<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out");
out->Resize(in->dims());
......
......@@ -24,6 +24,15 @@ class ScatterOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Ref"),
"Input(Ref) of ScatterOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Index"),
"Input(Index) of ScatterOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Updates"),
"Input(Updates) of ScatterOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of ScatterOp should not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("Index")->dims().size(), 1,
"Update Index should be 1-D.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("Ref")->dims().size(),
......
......@@ -23,9 +23,12 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of SequenceAvgPoolOp"
"must be initialized.");
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("X"), "Input(X) of SequenceAvgPoolOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of SequenceAvgPoolOp should not be null.");
auto* x = ctx.Input<framework::LoDTensor>("X");
auto dims = x->dims();
auto lod = x->lod();
......@@ -60,7 +63,9 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Gradient of Out should not be null");
"Gradient of Out should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"The input X should not be null.");
auto og_dims =
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"))->dims();
auto x_dims = ctx.Input<framework::LoDTensor>("X")->dims();
......
......@@ -21,6 +21,9 @@ namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
......@@ -43,8 +46,8 @@ class SequenceAvgPoolKernel : public framework::OpKernel {
static_cast<int>(lod[0][i + 1]));
Tensor out_t = out->Slice<T>(i, i + 1);
int64_t h = static_cast<int64_t>(lod[0][i + 1] - lod[0][i]);
auto in_e = EigenMatrix<T>::From(in_t, {h, w});
auto out_e = EigenMatrix<T>::From(out_t, {h, w});
auto in_e = EigenMatrix<T>::From(in_t, framework::make_ddim({h, w}));
auto out_e = EigenVector<T>::Flatten(out_t);
out_e.device(place) = in_e.mean(Eigen::array<int, 1>({{0}}));
}
}
......@@ -54,9 +57,9 @@ template <typename Place, typename T>
class SequenceAvgPoolGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Output<LoDTensor>("X");
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
auto* in = context.Input<LoDTensor>("X");
auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
auto dims = in->dims();
auto lod = in->lod();
......@@ -71,7 +74,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel {
int64_t h = static_cast<int64_t>(lod[0][i + 1] - lod[0][i]);
auto in_g_e = EigenMatrix<T>::From(in_g_t, {h, w});
auto out_g_e = EigenMatrix<T>::From(out_g_t, {1, w});
Eigen::DSizes<int, 2> bcast(h, w);
Eigen::DSizes<int, 2> bcast(h, 1);
in_g_e.device(place) = (out_g_e / static_cast<T>(h)).broadcast(bcast);
}
}
......
......@@ -23,6 +23,13 @@ class SGDOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("param"),
"Input(param) of SGDOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("grad"),
"Input(grad) of SGDOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("param_out"),
"Output(param_out) of SGDOp should not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("param")->dims(),
ctx.Input<Tensor>("grad")->dims(),
"Two input of SGD Op's dimension must be same.");
......
......@@ -23,6 +23,11 @@ class SigmoidOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of SigmoidOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"),
"Output(Y) of SigmoidOp should not be null.");
ctx.Output<framework::LoDTensor>("Y")->Resize(
ctx.Input<Tensor>("X")->dims());
}
......
......@@ -23,6 +23,11 @@ class SoftmaxOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input(X) of SoftmaxOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"),
"Output(Y) of SoftmaxOp should not be null.");
PADDLE_ENFORCE(ctx.Input<Tensor>("X")->dims().size() == 2UL,
"The input of softmax op must be a matrix.");
ctx.Output<framework::LoDTensor>("Y")->Resize(
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/split_op.h"
#include "paddle/operators/net_op.h"
namespace paddle {
namespace operators {
using framework::Tensor;
class SplitOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// infershape
auto *in = ctx.Input<framework::Tensor>("X");
auto outs = ctx.MultiOutput<framework::LoDTensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t num = static_cast<size_t>(ctx.Attr<int>("num"));
std::vector<int> sections =
static_cast<std::vector<int>>(ctx.Attr<std::vector<int>>("sections"));
const size_t n = outs.size();
if (num > 0) {
int64_t in_axis_dim = in->dims()[axis];
PADDLE_ENFORCE_EQ(in_axis_dim % num, 0,
"tensor split does not result"
" in an equal division");
size_t out_axis_dim = in_axis_dim / num;
for (size_t i = 0; i < n; ++i) {
auto dim = in->dims();
dim[axis] = out_axis_dim;
outs[i]->Resize(dim);
}
} else if (sections.size() > 0) {
PADDLE_ENFORCE_EQ(sections.size(), n,
"tensor split sections size"
"should be equal to output size.");
for (size_t i = 0; i < n; ++i) {
auto dim = in->dims();
dim[axis] = sections[i];
outs[i]->Resize(dim);
}
} else {
PADDLE_ENFORCE_NOT_NULL(nullptr, "split operator should",
" specify indices or sections.");
}
}
};
class SplitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SplitOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "the input tensor of split operator.");
AddOutput("Out", "the output tensors of split operator.").AsDuplicable();
AddComment(R"DOC(
Split the input tensor into multiple sub-tensors.
Example:
Input = [[1,2],
[3,4],
[5,6]]
sections = [2,1]
axis = 0
Output[0] = [[1,2],
[3,4]]
Output[1] = [[5,6]]
)DOC");
AddAttr<std::vector<int>>("sections",
"the length for each"
"output along with the specify axis.")
.SetDefault(std::vector<int>{});
AddAttr<int>("num",
"number of the sub-tensors, it must evenly divide "
"Input.dims()[axis]")
.SetDefault(0);
AddAttr<int>("axis", "The axis which the input will be splited on.")
.SetDefault(0);
}
};
class SplitOpGrad : public NetOp {
public:
SplitOpGrad(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
auto out_grad = Inputs(framework::GradVarName("Out"));
auto x_grad = Output(framework::GradVarName("X"));
AppendOp(framework::OpRegistry::CreateOp("concat", {{"X", out_grad}},
{{"Out", {x_grad}}}, attrs));
CompleteAddOp(false);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
USE_CPU_ONLY_OP(concat);
REGISTER_OP(split, ops::SplitOp, ops::SplitOpMaker, split_grad,
ops::SplitOpGrad);
REGISTER_OP_CPU_KERNEL(split,
ops::SplitKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class SplitKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto outs = ctx.MultiOutput<framework::Tensor>("Out");
int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
size_t before = 1, after = 1;
const size_t n = outs.size();
size_t input_axis_dim = in->dims()[axis];
for (int64_t i = 0; i < in->dims().size(); ++i) {
if (i == axis) {
continue;
}
if (i < axis) {
before *= in->dims()[i];
} else {
after *= in->dims()[i];
}
}
size_t input_offset = 0;
for (size_t i = 0; i < n; i++) {
auto& out = outs[i];
size_t axis_dim = out->dims()[axis];
for (size_t j = 0; j < before; j++) {
size_t len = axis_dim * after * sizeof(T);
T* dest =
out->mutable_data<T>(platform::CPUPlace()) + axis_dim * after * j;
const T* src =
in->data<T>() + input_offset + input_axis_dim * after * j;
memcpy(dest, src, len);
}
input_offset += axis_dim * after;
}
}
};
} // namespace operators
} // namespace paddle
......@@ -23,12 +23,18 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of SquaredL2DistanceOp "
"must be initialized.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Target of SquaredL2DistanceOp "
"must be initialized.");
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("X"),
"Input(X) of SquaredL2DistanceOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.InputVar("Y"),
"Input(Y) of SquaredL2DistanceOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("sub_result"),
"Output(sub_result) of SquaredL2DistanceOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of SquaredL2DistanceOp should not be null.");
auto* x = ctx.Input<Tensor>("X");
auto x_dims = x->dims();
......
......@@ -22,6 +22,11 @@ class SumOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(!ctx.MultiInputVar("X").empty(),
"Input(X) of SumOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of SumOp should not be null.");
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::LoDTensor>("Out");
int N = ins.size();
......
......@@ -24,7 +24,12 @@ class TopkOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of TopkOP must be initialized.");
"Input(X) of TopkOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"),
"Output(Out) of TopkOp should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Indices"),
"Output(Indices) of TopkOp should not be null.");
auto *input = ctx.Input<framework::Tensor>("X");
const int k = static_cast<int>(ctx.Attr<int>("k"));
......
......@@ -48,6 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(
ctx.OutputVar("Out"),
"Output(Out) of UniformRandomOp should not be null.");
PADDLE_ENFORCE(Attr<float>("min") < Attr<float>("max"),
"uniform_random's min must less then max");
auto* tensor = ctx.Output<framework::LoDTensor>("Out");
......
......@@ -24,4 +24,4 @@ cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator
nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info)
nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda)
nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place)
nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context)
......@@ -24,6 +24,11 @@ namespace platform {
#define USE_CUDA_ATOMIC(op, T) \
CUDA_ATOMIC_WRAPPER(op, T) { return atomic##op(address, val); }
// Default thread count per block(or block size).
// TODO(typhoonzero): need to benchmark against setting this value
// to 1024.
constexpr int PADDLE_CUDA_NUM_THREADS = 512;
// For atomicAdd.
USE_CUDA_ATOMIC(Add, float);
......
......@@ -101,19 +101,17 @@ CUDADeviceContext::CUDADeviceContext(GPUPlace place) : place_(place) {
eigen_stream_.reset(new EigenCudaStreamDevice());
eigen_stream_->Reinitialize(&stream_, place);
eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get()));
PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_));
PADDLE_ENFORCE(dynload::cublasSetStream(cublas_handle_, stream_));
PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_));
PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream_));
}
CUDADeviceContext::~CUDADeviceContext() {
SetDeviceId(place_.device);
Wait();
if (cublas_handle_) {
PADDLE_ENFORCE(dynload::cublasDestroy(cublas_handle_));
}
if (cudnn_handle_) {
PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_));
}
PADDLE_ENFORCE(dynload::cublasDestroy(cublas_handle_));
PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_));
eigen_stream_.reset();
eigen_device_.reset();
PADDLE_ENFORCE(cudaStreamDestroy(stream_));
......@@ -129,25 +127,13 @@ Eigen::GpuDevice* CUDADeviceContext::eigen_device() const {
return eigen_device_.get();
}
cublasHandle_t CUDADeviceContext::cublas_handle() {
if (!cublas_handle_) {
SetDeviceId(place_.device);
PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_));
PADDLE_ENFORCE(dynload::cublasSetStream(cublas_handle_, stream_));
}
cublasHandle_t CUDADeviceContext::cublas_handle() const {
return cublas_handle_;
}
cudnnHandle_t CUDADeviceContext::cudnn_handle() {
if (!cudnn_handle_) {
SetDeviceId(place_.device);
PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_));
PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream_));
}
return cudnn_handle_;
}
cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; }
cudaStream_t CUDADeviceContext::stream() { return stream_; }
cudaStream_t CUDADeviceContext::stream() const { return stream_; }
#endif // PADDLE_ONLY_CPU
......
......@@ -67,16 +67,14 @@ class CUDADeviceContext : public DeviceContext {
/*! \brief Return eigen device in the device context. */
Eigen::GpuDevice* eigen_device() const;
// clang-format off
/*! \brief Return cublas handle in the device context. */
cublasHandle_t cublas_handle();
cublasHandle_t cublas_handle() const;
/*! \brief Return cudnn handle in the device context. */
cudnnHandle_t cudnn_handle();
cudnnHandle_t cudnn_handle() const;
/*! \brief Return cuda stream in the device context. */
cudaStream_t stream();
// clang-format on
cudaStream_t stream() const;
private:
GPUPlace place_;
......@@ -84,11 +82,9 @@ class CUDADeviceContext : public DeviceContext {
std::unique_ptr<Eigen::GpuDevice> eigen_device_;
std::unique_ptr<EigenCudaStreamDevice> eigen_stream_;
// clang-format off
cudaStream_t stream_{nullptr};
cudnnHandle_t cudnn_handle_{nullptr};
cublasHandle_t cublas_handle_{nullptr};
// clang-format on
cudaStream_t stream_;
cudnnHandle_t cudnn_handle_;
cublasHandle_t cublas_handle_;
};
#endif
......
......@@ -14,6 +14,7 @@
#pragma once
#include "paddle/platform/device_context.h"
#include "paddle/platform/enforce.h"
#include "paddle/platform/hostdevice.h"
#include "paddle/platform/place.h"
......@@ -21,46 +22,78 @@
#include <algorithm>
#include <type_traits>
#ifdef __NVCC__
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include "paddle/platform/details/device_ptr_cast.h"
#endif
namespace paddle {
namespace platform {
// Transform on host or device. It provides the same API in std library.
template <typename Place, typename InputIter, typename OutputIter,
typename UnaryOperation>
void Transform(Place place, InputIter first, InputIter last, OutputIter result,
UnaryOperation op) {
if (is_cpu_place(place)) {
template <typename Place>
struct Transform {
template <typename InputIter, typename OutputIter, typename UnaryOperation>
void operator()(const DeviceContext& context, InputIter first, InputIter last,
OutputIter result, UnaryOperation op);
template <typename InputIter1, typename InputIter2, typename OutputIter,
typename BinaryOperation>
void operator()(const DeviceContext& context, InputIter1 first1,
InputIter1 last1, InputIter2 first2, OutputIter result,
BinaryOperation op);
};
template <>
struct Transform<platform::CPUPlace> {
template <typename InputIter, typename OutputIter, typename UnaryOperation>
void operator()(const DeviceContext& context, InputIter first, InputIter last,
OutputIter result, UnaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_cpu_place(place), "It must use CPU place.");
std::transform(first, last, result, op);
} else {
#ifdef __NVCC__
using namespace details;
thrust::transform(DevPtrCast(first), DevPtrCast(last), DevPtrCast(result),
op);
#else
PADDLE_THROW("Do not invoke `Transform<GPUPlace>` in .cc file");
#endif
}
}
template <typename Place, typename InputIter1, typename InputIter2,
typename OutputIter, typename BinaryOperation>
void Transform(Place place, InputIter1 first1, InputIter1 last1,
InputIter2 first2, OutputIter result, BinaryOperation op) {
if (is_cpu_place(place)) {
template <typename InputIter1, typename InputIter2, typename OutputIter,
typename BinaryOperation>
void operator()(const DeviceContext& context, InputIter1 first1,
InputIter1 last1, InputIter2 first2, OutputIter result,
BinaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_cpu_place(place), "It must use CPU place.");
std::transform(first1, last1, first2, result, op);
} else {
}
};
#ifdef __NVCC__
using namespace details;
thrust::transform(DevPtrCast(first1), DevPtrCast(last1), DevPtrCast(first2),
DevPtrCast(result), op);
#else
PADDLE_THROW("Do not invoke `Transform<GPUPlace>` in .cc file");
#endif
template <>
struct Transform<platform::GPUPlace> {
template <typename InputIter, typename OutputIter, typename UnaryOperation>
void operator()(const DeviceContext& context, InputIter first, InputIter last,
OutputIter result, UnaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place.");
auto& ctx = reinterpret_cast<const CUDADeviceContext&>(context);
thrust::transform(thrust::cuda::par.on(ctx.stream()),
details::DevPtrCast(first), details::DevPtrCast(last),
details::DevPtrCast(result), op);
}
template <typename InputIter1, typename InputIter2, typename OutputIter,
typename BinaryOperation>
void operator()(const DeviceContext& context, InputIter1 first1,
InputIter1 last1, InputIter2 first2, OutputIter result,
BinaryOperation op) {
auto place = context.GetPlace();
PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place.");
auto& ctx = reinterpret_cast<const CUDADeviceContext&>(context);
thrust::transform(thrust::cuda::par.on(ctx.stream()),
details::DevPtrCast(first1), details::DevPtrCast(last1),
details::DevPtrCast(first2), details::DevPtrCast(result),
op);
}
};
#endif
} // namespace platform
} // namespace paddle
......@@ -15,6 +15,7 @@
#include <gtest/gtest.h>
#include "paddle/memory/memcpy.h"
#include "paddle/memory/memory.h"
#include "paddle/platform/hostdevice.h"
#include "paddle/platform/transform.h"
template <typename T>
......@@ -36,8 +37,10 @@ class Multiply {
TEST(Transform, CPUUnary) {
using namespace paddle::platform;
CPUDeviceContext ctx;
float buf[4] = {0.1, 0.2, 0.3, 0.4};
Transform(CPUPlace(), buf, buf + 4, buf, Scale<float>(10));
Transform<paddle::platform::CPUPlace> trans;
trans(ctx, buf, buf + 4, buf, Scale<float>(10));
for (int i = 0; i < 4; ++i) {
ASSERT_NEAR(buf[i], static_cast<float>(i + 1), 1e-5);
}
......@@ -47,10 +50,13 @@ TEST(Transform, GPUUnary) {
using namespace paddle::platform;
using namespace paddle::memory;
GPUPlace gpu0(0);
CUDADeviceContext ctx(gpu0);
float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4};
float* gpu_buf = static_cast<float*>(Alloc(gpu0, sizeof(float) * 4));
Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf));
Transform(gpu0, gpu_buf, gpu_buf + 4, gpu_buf, Scale<float>(10));
Transform<paddle::platform::GPUPlace> trans;
trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale<float>(10));
ctx.Wait();
Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf));
Free(gpu0, gpu_buf);
for (int i = 0; i < 4; ++i) {
......@@ -62,7 +68,9 @@ TEST(Transform, CPUBinary) {
using namespace paddle::platform;
using namespace paddle::memory;
int buf[4] = {1, 2, 3, 4};
Transform(CPUPlace(), buf, buf + 4, buf, buf, Multiply<int>());
Transform<paddle::platform::CPUPlace> trans;
CPUDeviceContext ctx;
trans(ctx, buf, buf + 4, buf, buf, Multiply<int>());
for (int i = 0; i < 4; ++i) {
ASSERT_EQ((i + 1) * (i + 1), buf[i]);
}
......@@ -73,12 +81,15 @@ TEST(Transform, GPUBinary) {
using namespace paddle::memory;
int buf[4] = {1, 2, 3, 4};
GPUPlace gpu0(0);
CUDADeviceContext ctx(gpu0);
int* gpu_buf = static_cast<int*>(Alloc(gpu0, sizeof(buf)));
Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf));
Transform(gpu0, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply<int>());
Transform<paddle::platform::GPUPlace> trans;
trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply<int>());
ctx.Wait();
Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf));
Free(gpu0, gpu_buf);
for (int i = 0; i < 4; ++i) {
ASSERT_EQ((i + 1) * (i + 1), buf[i]);
}
}
\ No newline at end of file
}
......@@ -45,14 +45,18 @@ add_dependencies(paddle_pserver paddle_proto ${external_project_dependencies})
set(PSERVER_MAIN_SOURCES
ParameterServer2Main.cpp)
add_executable(paddle_pserver_main
${PSERVER_MAIN_SOURCES})
link_paddle_exe(paddle_pserver_main)
if(WITH_TESTING)
add_subdirectory(test)
endif()
install(TARGETS paddle_pserver_main
RUNTIME DESTINATION opt/paddle/bin
PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ)
set_target_properties(paddle_pserver_main PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)
if(NOT WITH_C_API)
add_executable(paddle_pserver_main ${PSERVER_MAIN_SOURCES})
link_paddle_exe(paddle_pserver_main)
install(TARGETS paddle_pserver_main
RUNTIME DESTINATION opt/paddle/bin
PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ)
set_target_properties(paddle_pserver_main PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)
endif()
if(WITH_PYTHON)
cc_library(paddle_pybind SHARED
cc_library(paddle_pybind SHARED
SRCS pybind.cc
DEPS pybind python backward
${GLOB_OP_LIB})
......
#!/bin/bash
set -e
# Create the build directory for CMake.
mkdir -p $TRAVIS_BUILD_DIR/build_ios
cd $TRAVIS_BUILD_DIR/build_ios
# Compile paddle binaries
cmake -DCMAKE_SYSTEM_NAME=iOS \
-DIOS_PLATFORM=OS \
-DCMAKE_OSX_ARCHITECTURES="arm64" \
-DWITH_C_API=ON \
-DUSE_EIGEN_FOR_BLAS=ON \
-DWITH_TESTING=OFF \
-DWITH_SWIG_PY=OFF \
-DWITH_STYLE_CHECK=OFF \
-DCMAKE_BUILD_TYPE=Release \
..
make -j 2
......@@ -8,6 +8,12 @@ function abort(){
trap 'abort' 0
set -e
# install glide
curl https://glide.sh/get | bash
eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
go get -u github.com/alecthomas/gometalinter
gometalinter --install
cd $TRAVIS_BUILD_DIR
export PATH=/usr/bin:$PATH
pre-commit install
......
......@@ -50,22 +50,22 @@ macro(add_paddle_exe TARGET_NAME)
link_paddle_exe(${TARGET_NAME})
endmacro()
add_paddle_exe(paddle_trainer
TrainerMain.cpp)
add_paddle_exe(paddle_merge_model
MergeModel.cpp)
if(WITH_TESTING)
add_subdirectory(tests)
add_subdirectory(tests)
endif()
install(TARGETS paddle_trainer paddle_merge_model
RUNTIME DESTINATION opt/paddle/bin
PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ)
set_target_properties(paddle_trainer PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)
set_target_properties(paddle_merge_model PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)
if(NOT WITH_C_API)
add_paddle_exe(paddle_trainer TrainerMain.cpp)
add_paddle_exe(paddle_merge_model MergeModel.cpp)
install(TARGETS paddle_trainer paddle_merge_model
RUNTIME DESTINATION opt/paddle/bin
PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ)
set_target_properties(paddle_trainer PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)
set_target_properties(paddle_merge_model PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE)
endif()
if(APPLE)
set(CMAKE_EXE_LINKER_FLAGS "-framework CoreFoundation -framework Security")
......@@ -73,6 +73,8 @@ endif()
if(WITH_GOLANG)
add_dependencies(paddle_trainer_lib paddle_pserver_cclient)
target_link_libraries(paddle_trainer paddle_pserver_cclient)
target_link_libraries(paddle_trainer_lib paddle_pserver_cclient)
if(NOT WITH_C_API)
target_link_libraries(paddle_trainer paddle_pserver_cclient)
endif()
endif(WITH_GOLANG)
......@@ -17,7 +17,8 @@ limitations under the License. */
#include <fenv.h>
#if defined(__APPLE__) || defined(__OSX__)
#if (defined(__APPLE__) || defined(__OSX__)) && !defined(__arm__) && \
!defined(__aarch64__)
int fegetexcept(void);
int feenableexcept(unsigned int excepts);
......
......@@ -40,6 +40,8 @@ void Semaphore::wait() { sem_wait(&m->sem); }
void Semaphore::post() { sem_post(&m->sem); }
/// SpinLockPrivate
#ifdef PADDLE_USE_PTHREAD_SPINLOCK
class SpinLockPrivate {
......@@ -79,6 +81,8 @@ SpinLock::~SpinLock() { delete m; }
void SpinLock::lock() { m->lock(); }
void SpinLock::unlock() { m->unlock(); }
/// ThreadBarrierPrivate
#ifdef PADDLE_USE_PTHREAD_BARRIER
class ThreadBarrierPrivate {
......@@ -136,6 +140,8 @@ public:
#endif
/// ThreadBarrier
ThreadBarrier::ThreadBarrier(int count) : m(new ThreadBarrierPrivate(count)) {}
ThreadBarrier::~ThreadBarrier() { delete m; }
void ThreadBarrier::wait() { m->wait(); }
......
......@@ -14,7 +14,8 @@ limitations under the License. */
#include "paddle/utils/Excepts.h"
#if defined(__APPLE__) || defined(__OSX__)
#if (defined(__APPLE__) || defined(__OSX__)) && !defined(__arm__) && \
!defined(__aarch64__)
int fegetexcept(void) {
static fenv_t fenv;
......
......@@ -2286,8 +2286,15 @@ class NormLayer(LayerBase):
@config_layer('pool')
class PoolLayer(LayerBase):
layer_type = 'pool'
def __init__(self, name, inputs, ceil_mode=True, **xargs):
super(PoolLayer, self).__init__(name, 'pool', 0, inputs=inputs, **xargs)
use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0))
if self.layer_type == "mkldnn_pool":
config_assert(use_mkldnn, "mkldnn_pool only support MKLDNN")
self.layer_type = 'mkldnn_pool' if use_mkldnn else 'pool'
super(PoolLayer, self).__init__(
name, self.layer_type, 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
pool_conf = self.config.inputs[input_index].pool_conf
......@@ -2297,6 +2304,11 @@ class PoolLayer(LayerBase):
pool_conf.channels)
@config_layer('mkldnn_pool')
class MKLDNNPoolLayer(PoolLayer):
layer_type = 'mkldnn_pool'
@config_layer('pool3d')
class Pool3DLayer(LayerBase):
def __init__(self, name, inputs, ceil_mode=True, **xargs):
......
......@@ -781,11 +781,11 @@ class MixedLayerType(LayerOutput):
:type size: int
:param act: activation type.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will
get a default Bias.
:type bias_attr: ParameterAttribute or None means has bias. Any other
type means no bias.
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute or None
"""
......@@ -881,10 +881,11 @@ def mixed_layer(size=0,
then this function will just return layer's name.
:param act: Activation Type.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute or None or bool
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: The extra layer config. Default is None.
:type layer_attr: ExtraLayerAttribute
:return: MixedLayerType object can add inputs or layer name.
......@@ -920,7 +921,7 @@ def data_layer(name, size, depth=None, height=None, width=None,
data = data_layer(name="input", size=1000)
:param name: Name of this data layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:param size: Size of this data layer.
:type size: int
......@@ -960,7 +961,7 @@ def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None):
"""
Define a embedding Layer.
:param name: Name of this embedding layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer for this embedding. NOTE: must be Index Data.
:type input: LayerOutput
......@@ -1015,7 +1016,7 @@ def fc_layer(input,
with mixed_layer(size=1024) as fc:
fc += full_matrix_projection(input=layer)
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer. Could be a list/tuple of input layer.
:type input: LayerOutput|list|tuple
......@@ -1025,10 +1026,11 @@ def fc_layer(input,
:type act: BaseActivation
:param param_attr: The Parameter Attribute|list.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute|None|Any
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
......@@ -1065,7 +1067,7 @@ def printer_layer(input, format=None, name=None):
"""
Print the output value of input layers. This layer is useful for debugging.
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer. Could be a list/tuple of input layer.
:type input: LayerOutput|list|tuple
......@@ -1103,7 +1105,7 @@ def priorbox_layer(input,
"""
Compute the priorbox and set the variance. This layer is necessary for ssd.
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput
......@@ -1152,7 +1154,7 @@ def multibox_loss_layer(input_loc,
"""
Compute the location loss and the confidence loss for ssd.
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input_loc: The input predict locations.
:type input_loc: LayerOutput | List of LayerOutput
......@@ -1224,10 +1226,10 @@ def detection_output_layer(input_loc,
name=None):
"""
Apply the NMS to the output of network and compute the predict bounding
box location. The output of this layer could be None if there is no valid
bounding box.
box location. The output's shape of this layer could be zero if there is
no valid bounding box.
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input_loc: The input predict locations.
:type input_loc: LayerOutput | List of LayerOutput.
......@@ -1299,7 +1301,7 @@ def cross_channel_norm_layer(input, name=None, param_attr=None):
a conv layer's output and scale the output by a group of trainable
factors which dimensions equal to the channel's number.
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput
......@@ -1364,7 +1366,7 @@ def pooling_layer(input,
:param agg_level: AggregateLevel.TO_NO_SEQUENCE or
AggregateLevel.TO_SEQUENCE
:type agg_level: AggregateLevel
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input layer name.
:type input: LayerOutput
......@@ -1373,8 +1375,11 @@ def pooling_layer(input,
:type pooling_type: BasePoolingType|None
:param stride: The step size between successive pooling regions.
:type stride: Int
:param bias_attr: Bias parameter attribute. False if no bias.
:type bias_attr: ParameterAttribute|None|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: The Extra Attributes for layer, such as dropout.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
......@@ -1471,10 +1476,11 @@ def lstmemory(input,
:type gate_act: BaseActivation
:param state_act: state activation type, TanhActivation by default.
:type state_act: BaseActivation
:param bias_attr: Bias attribute. None means default bias. False means no
bias.
:type bias_attr: ParameterAttribute|None|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr: Parameter Attribute.
:type param_attr: ParameterAttribute|None|False
:param layer_attr: Extra Layer attribute
......@@ -1596,9 +1602,11 @@ def grumemory(input,
This activation affects the :math:`z_t` and :math:`r_t`. It is the
:math:`\\sigma` in the above formula.
:type gate_act: BaseActivation
:param bias_attr: Bias attribute. None means default bias. False means no
bias.
:type bias_attr: ParameterAttribute|None|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr: Parameter Attribute.
:type param_attr: ParameterAttribute|None|False
:param layer_attr: Extra Layer attribute
......@@ -1657,7 +1665,7 @@ def last_seq(input,
seq = last_seq(input=layer)
:param agg_level: Aggregated level
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layer name.
:type input: LayerOutput
......@@ -1713,7 +1721,7 @@ def first_seq(input,
seq = first_seq(input=layer)
:param agg_level: aggregation level
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layer name.
:type input: LayerOutput
......@@ -1792,11 +1800,13 @@ def expand_layer(input,
:type input: LayerOutput
:param expand_as: Expand as this layer's sequence info.
:type expand_as: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param bias_attr: Bias attribute. None means default bias. False means no
bias.
:type bias_attr: ParameterAttribute|None|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param expand_level: whether input layer is timestep(default) or sequence.
:type expand_level: ExpandLevel
:param layer_attr: extra layer attributes.
......@@ -1849,7 +1859,7 @@ def repeat_layer(input,
:type input: LayerOutput
:param num_repeats: Repeat the input so many times
:type num_repeats: int
:param name: Layer name.
:param name: The name of this layer. It is optional.
:param as_row_vector: True for treating input as row vector and repeating
in the column direction. This is equivalent to apply
concat_layer() with num_repeats same input.
......@@ -1908,16 +1918,17 @@ def seq_reshape_layer(input,
:type input: LayerOutput
:param reshape_size: the size of reshaped sequence.
:type reshape_size: int
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param act: Activation type.
:type act: BaseActivation
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute or None or bool
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -1960,7 +1971,7 @@ def interpolation_layer(input, weight, name=None, layer_attr=None):
:type input: list|tuple
:param weight: Weight layer.
:type weight: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -2065,7 +2076,7 @@ def power_layer(input, weight, name=None, layer_attr=None):
:type input: LayerOutput
:param weight: Weight layer.
:type weight: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -2109,7 +2120,7 @@ def scaling_layer(input, weight, name=None, layer_attr=None):
:type input: LayerOutput
:param weight: Weight layer.
:type weight: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -2147,7 +2158,7 @@ def trans_layer(input, name=None, layer_attr=None):
:param input: Input layer.
:type input: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -2187,7 +2198,7 @@ def rotate_layer(input, height, width, name=None, layer_attr=None):
:type input: LayerOutput
:param height: The height of the sample matrix
:type height: int
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -2232,7 +2243,7 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
cos = cos_sim(a=layer1, b=layer2, size=3)
:param name: layer name
:param name: The name of this layer. It is optional.
:type name: basestring
:param a: input layer a
:type a: LayerOutput
......@@ -2299,11 +2310,13 @@ def hsigmoid(input,
:type label: LayerOutput
:param num_classes: number of classes.
:type num_classes: int|None
:param name: layer name
:param name: The name of this layer. It is optional.
:type name: basestring
:param bias_attr: Bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr: Parameter Attribute. None means default parameter.
:type param_attr: ParameterAttribute|None
:param layer_attr: Extra Layer Attribute.
......@@ -2411,7 +2424,7 @@ def img_conv_layer(input,
bias_attr=False,
act=ReluActivation())
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Layer Input.
:type input: LayerOutput
......@@ -2442,9 +2455,11 @@ def img_conv_layer(input,
:type dilation: int|tuple|list
:param dilation_y: The y dimension of the dilation.
:type dilation_y: int
:param bias_attr: Convolution bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param num_channels: number of input channels. If None will be set
automatically from previous output.
:type num_channels: int
......@@ -2835,7 +2850,7 @@ def spp_layer(input,
num_channels=16,
pool_type=MaxPooling())
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: layer's input.
:type input: LayerOutput
......@@ -2929,7 +2944,7 @@ def img_cmrnorm_layer(input,
norm = img_cmrnorm_layer(input=net, size=5)
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param input: layer's input.
:type input: LayerOutput
......@@ -2992,7 +3007,7 @@ def batch_norm_layer(input,
norm = batch_norm_layer(input=net, act=ReluActivation())
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: batch normalization input. Better be linear activation.
Because there is an activation inside batch_normalization.
......@@ -3016,7 +3031,7 @@ def batch_norm_layer(input,
:type num_channels: int
:param bias_attr: :math:`\\beta`, better be zero when initialize. So the
initial_std=0, initial_mean=1 is best practice.
:type bias_attr: ParameterAttribute
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr: :math:`\\gamma`, better be one when initialize. So the
initial_std=0, initial_mean=1 is best practice.
:type param_attr: ParameterAttribute
......@@ -3091,7 +3106,7 @@ def sum_to_one_norm_layer(input, name=None, layer_attr=None):
:param input: Input layer.
:type input: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -3127,7 +3142,7 @@ def row_l2_norm_layer(input, name=None, layer_attr=None):
:param input: Input layer.
:type input: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -3179,16 +3194,18 @@ def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None):
dropout here.
Please refer to dropout_layer for details.
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layers. It could be a LayerOutput or list/tuple of
LayerOutput.
:type input: LayerOutput|list|tuple
:param act: Activation Type, default is tanh.
:type act: BaseActivation
:param bias_attr: Bias attribute. If False, means no bias. None is default
bias.
:type bias_attr: ParameterAttribute|bool
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -3237,7 +3254,7 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
concat = concat_layer(input=[layer1, layer2])
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input layers or projections
:type input: list|tuple|collections.Sequence
......@@ -3330,7 +3347,7 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
concat = seq_concat_layer(a=layer1, b=layer2)
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param a: input sequence layer
:type a: LayerOutput
......@@ -3340,10 +3357,11 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute or None or bool
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -3506,7 +3524,7 @@ def lstm_step_layer(input,
output is :math:`o_t`, whose name is 'state' and can use
:code:`get_output_layer` to extract this output.
:param name: Layer's name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param size: Layer's size. NOTE: lstm layer's size, should be equal to
:code:`input.size/4`, and should be equal to
......@@ -3524,8 +3542,11 @@ def lstm_step_layer(input,
:param state_act: State Activation Type. Default is sigmoid, and should
be sigmoid only.
:type state_act: BaseActivation
:param bias_attr: Bias Attribute.
:type bias_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
......@@ -3576,9 +3597,13 @@ def gru_step_layer(input,
:param output_mem:
:param size:
:param act:
:param name:
:param name: The name of this layer. It is optional.
:param gate_act:
:param bias_attr:
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr: the parameter_attribute for transforming the output_mem
from previous step.
:param layer_attr:
......@@ -3632,10 +3657,14 @@ def gru_step_naive_layer(input,
:param input:
:param output_mem:
:param size:
:param name:
:param name: The name of this layer. It is optional.
:param act:
:param gate_act:
:param bias_attr:
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr:
:param layer_attr:
:return:
......@@ -3691,7 +3720,7 @@ def get_output_layer(input, arg_name, name=None, layer_attr=None):
output besides the default one, please use get_output_layer first to get
the output from input.
:param name: Layer's name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: get output layer's input. And this layer should contains
multiple outputs.
......@@ -3757,11 +3786,14 @@ def recurrent_layer(input,
:type input: LayerOutput
:param act: activation.
:type act: BaseActivation
:param bias_attr: bias attribute.
:type bias_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param param_attr: parameter attribute.
:type param_attr: ParameterAttribute
:param name: name of the layer
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: Layer Attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -4000,7 +4032,7 @@ def maxid_layer(input, name=None, layer_attr=None):
:param input: Input layer name.
:type input: LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -4033,7 +4065,7 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None):
out_prod = out_prod_layer(input1=vec1, input2=vec2)
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input1: The first input layer name.
:type input: LayerOutput
......@@ -4074,7 +4106,7 @@ def eos_layer(input, eos_id, name=None, layer_attr=None):
eos = eos_layer(input=layer, eos_id=id)
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Input layer name.
:type input: LayerOutput
......@@ -4265,7 +4297,7 @@ def square_error_cost(input,
cost = \\sum_{i=1}^N(t_i-y_i)^2
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Network prediction.
:type input: LayerOutput
......@@ -4307,7 +4339,7 @@ def classification_cost(input,
"""
classification cost Layer.
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input layer name. network output.
:type input: LayerOutput
......@@ -4611,7 +4643,7 @@ def pad_layer(input,
:type pad_w: list|None
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:param name: layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -4679,7 +4711,7 @@ def conv_shift_layer(a, b, name=None, layer_attr=None):
conv_shift = conv_shift_layer(a=layer1, b=layer2)
:param name: layer name
:param name: The name of this layer. It is optional.
:type name: basestring
:param a: Input layer a.
:type a: LayerOutput
......@@ -4735,7 +4767,7 @@ def tensor_layer(a,
tensor = tensor_layer(a=layer1, b=layer2, size=1000)
:param name: layer name
:param name: The name of this layer. It is optional.
:type name: basestring
:param a: Input layer a.
:type a: LayerOutput
......@@ -4747,10 +4779,11 @@ def tensor_layer(a,
:type act: BaseActivation
:param param_attr: The Parameter Attribute.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute|None|Any
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
......@@ -4797,7 +4830,7 @@ def selective_fc_layer(input,
sel_fc = selective_fc_layer(input=input, size=128, act=TanhActivation())
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput|list|tuple
......@@ -4811,10 +4844,11 @@ def selective_fc_layer(input,
:type act: BaseActivation
:param param_attr: The Parameter Attribute.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute|None|Any
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
......@@ -4870,7 +4904,7 @@ def sampling_id_layer(input, name=None, layer_attr=None):
:param input: The input layer.
:type input: LayerOutput
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
......@@ -4908,7 +4942,7 @@ def slope_intercept_layer(input,
:param input: The input layer.
:type input: LayerOutput
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param slope: the scale factor.
:type slope: float.
......@@ -4972,7 +5006,7 @@ def linear_comb_layer(weights, vectors, size=None, name=None, layer_attr=None):
:type vectors: LayerOutput
:param size: the dimension of this layer.
:type size: int
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
......@@ -5055,7 +5089,7 @@ def block_expand_layer(input,
:type padding_x: int
:param padding_y: The padding size in vertical direction.
:type padding_y: int
:param name: The name of this layer, which can not specify.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
......@@ -5124,7 +5158,7 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None):
:type num_channels: int|None
:param groups: The group number of input layer.
:type groups: int
:param name: The name of this layer, which can not specify.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -5188,7 +5222,7 @@ def ctc_layer(input,
:type label: LayerOutput
:param size: category numbers + 1.
:type size: int
:param name: The name of this layer
:param name: The name of this layer. It is optional.
:type name: basestring|None
:param norm_by_times: Whether to normalization by times. False by default.
:type norm_by_times: bool
......@@ -5265,7 +5299,7 @@ def warp_ctc_layer(input,
:type label: LayerOutput
:param size: category numbers + 1.
:type size: int
:param name: The name of this layer, which can not specify.
:param name: The name of this layer. It is optional.
:type name: basestring|None
:param blank: the 'blank' label used in ctc
:type blank: int
......@@ -5329,7 +5363,7 @@ def crf_layer(input,
:type weight: LayerOutput
:param param_attr: Parameter attribute. None means default attribute
:type param_attr: ParameterAttribute
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
......@@ -5399,7 +5433,7 @@ def crf_decoding_layer(input,
:type label: LayerOutput or None
:param param_attr: Parameter attribute. None means default attribute
:type param_attr: ParameterAttribute
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
......@@ -5458,9 +5492,9 @@ def nce_layer(input,
param_attr=[attr1, attr2], weight=layer3,
num_classes=3, neg_distribution=[0.1,0.3,0.6])
:param name: layer name
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input layers. It could be a LayerOutput of list/tuple of LayerOutput.
:param input: The input layers. It could be a LayerOutput of list/tuple of LayerOutput.
:type input: LayerOutput|list|tuple|collections.Sequence
:param label: label layer
:type label: LayerOutput
......@@ -5478,8 +5512,11 @@ def nce_layer(input,
A uniform distribution will be used if not provided.
If not None, its length must be equal to num_classes.
:type neg_distribution: list|tuple|collections.Sequence|None
:param bias_attr: Bias parameter attribute. True if no bias.
:type bias_attr: ParameterAttribute|None|False
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: layer name.
......@@ -5594,7 +5631,7 @@ def rank_cost(left,
:param weight: The weight affects the cost, namely the scale of cost.
It is an optional argument.
:type weight: LayerOutput
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
......@@ -5648,7 +5685,7 @@ def lambda_cost(input,
:param score: The 2nd input. Score of each sample.
:type input: LayerOutput
:param NDCG_num: The size of NDCG (Normalized Discounted Cumulative Gain),
e.g., 5 for NDCG@5. It must be less than for equal to the
e.g., 5 for NDCG@5. It must be less than or equal to the
minimum size of lists.
:type NDCG_num: int
:param max_sort_size: The size of partial sorting in calculating gradient.
......@@ -5659,7 +5696,7 @@ def lambda_cost(input,
than the size of a list, the algorithm will sort the
entire list of get gradient.
:type max_sort_size: int
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -5703,7 +5740,7 @@ def cross_entropy(input,
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param coeff: The cost is multiplied with coeff.
The coefficient affects the gradient in the backward.
......@@ -5751,7 +5788,7 @@ def cross_entropy_with_selfnorm(input,
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
......@@ -5791,7 +5828,7 @@ def sum_cost(input, name=None, layer_attr=None):
:param input: The first input layer.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
......@@ -5836,7 +5873,7 @@ def huber_regression_cost(input,
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param delta: The difference between the observed and predicted values.
:type delta: float.
......@@ -5886,7 +5923,7 @@ def huber_classification_cost(input,
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
......@@ -5929,7 +5966,7 @@ def multi_binary_label_cross_entropy(input,
:type input: LayerOutput
:param label: The input label.
:type input: LayerOutput
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
......@@ -6034,9 +6071,9 @@ def cross_entropy_over_beam(input, name=None):
])
:param input: input beams for this layer.
:param input: Input beams for this layer.
:type input: BeamInput
:param name: input beams for this layer.
:param name: The name of this layer.
:type name: basestring
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -6097,7 +6134,7 @@ def smooth_l1_cost(input, label, name=None, coeff=1.0, layer_attr=None):
:type input: LayerOutput
:param label: The input label.
:type input: LayerOutput
:param name: The name of this layers. It is not necessary.
:param name: The name of this layer. It is optional.
:type name: None|basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
......@@ -6145,7 +6182,7 @@ def multiplex_layer(input, name=None, layer_attr=None):
:param input: Input layers.
:type input: list of LayerOutput
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
......@@ -6176,12 +6213,21 @@ def multiplex_layer(input, name=None, layer_attr=None):
@wrap_name_default("dropout")
def dropout_layer(input, dropout_rate, name=None):
"""
@TODO(yuyang18): Add comments.
:param name:
:param input:
:param dropout_rate:
:return:
The example usage is:
.. code-block:: python
dropout = dropout_layer(input=input_layer, dropout_rate=0.5)
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput
:param dropout_rate: The probability of dropout.
:type dropout_rate: float
:return: LayerOutput object.
:rtype: LayerOutput
"""
return addto_layer(
name=name,
......@@ -6204,7 +6250,7 @@ def row_conv_layer(input,
"""
The row convolution is called lookahead convolution. It is firstly
introduced in paper of `Deep Speech 2: End-toEnd Speech Recognition
introduced in paper of `Deep Speech 2: End-to-End Speech Recognition
in English and Mandarin <https://arxiv.org/pdf/1512.02595v1.pdf>`_ .
The bidirectional RNN that learns representation for a sequence by
......@@ -6212,9 +6258,9 @@ def row_conv_layer(input,
However, unlike unidirectional RNNs, bidirectional RNNs are challenging
to deploy in an online and low-latency setting. The lookahead convolution
incorporates information from future subsequences in a computationally
efficient manner to improve unidirectional recurrent neural networks.
efficient manner to improve unidirectional RNNs.
The connection of row convolution is different form the 1D sequence
The connection of row convolution is different from the 1D sequence
convolution. Assumed that, the future context-length is k, that is to say,
it can get the output at timestep t by using the the input feature from t-th
timestep to (t+k+1)-th timestep. Assumed that the hidden dim of input
......@@ -6243,7 +6289,7 @@ def row_conv_layer(input,
:param act: Activation Type. Default is linear activation.
:type act: BaseActivation
:param param_attr: The Parameter Attribute. If None, the parameter will be
initialized smartly. It's better set it by yourself.
initialized smartly. It's better to set it by yourself.
:type param_attr: ParameterAttribute
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
......@@ -6290,7 +6336,7 @@ def prelu_layer(input,
prelu = prelu_layer(input=layers, partial_sum=1)
:param name: Name of this layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput
......@@ -6343,7 +6389,7 @@ def gated_unit_layer(input,
The gated unit layer implements a simple gating mechanism over the input.
The input :math:`X` is first projected into a new space :math:`X'`, and
it is also used to produce a gate weight :math:`\sigma`. Element-wise
prodict between :match:`X'` and :math:`\sigma` is finally returned.
product between :match:`X'` and :math:`\sigma` is finally returned.
Reference:
Language Modeling with Gated Convolutional Networks
......@@ -6363,7 +6409,7 @@ def gated_unit_layer(input,
:type size: int
:param act: activation type of the projected input.
:type act: BaseActivation
:param name: name of this layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:param gate_attr: Attributes to tune the gate output, for example, error
clipping threshold, dropout and so on. See ExtraLayerAttribute for
......@@ -6439,10 +6485,10 @@ def switch_order_layer(input,
:param input: The input layer.
:type input: LayerOutput
:param name: Name of this layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:param reshape: reshape matrix by axises.
:type reshape: Dict
:param reshape_axis: Specify the axises of 'height'. Its value should be positive and less than 4.
:type reshape_axis: int
:return: LayerOutput object.
:rtype: LayerOutput
"""
......@@ -6492,7 +6538,7 @@ def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None):
:type partial_sum: int
:param shape: The shape to be cropped. Default is None.
:type shape: Sequence | None
:param name: Name of this layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -6538,7 +6584,7 @@ def sub_nested_seq_layer(input, selected_indices, name=None):
:type input: LayerOutput
:param selected_indices: a set of sequence indices in the nested sequence.
:type input: LayerOutput
:param name: name of this layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -6576,7 +6622,7 @@ def clip_layer(input, min, max, name=None):
clip = clip_layer(input=input_layer, min=-10, max=10)
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput.
......@@ -6621,7 +6667,7 @@ def seq_slice_layer(input, starts, ends, name=None):
seq_silce = seq_slice_layer(input=input_seq,
starts=start_pos, ends=end_pos)
:param name: name of this layer.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: input for this layer, it should be a sequence.
:type input: LayerOutput
......@@ -6675,12 +6721,12 @@ def kmax_seq_score_layer(input, name=None, beam_size=1):
kmax_indices = kmax_seq_score_layer(input=input_layer, beam_size)
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer. It stores scores over a sequence or a nested
sequence and its size must be 1.
:type input: LayerOutput.
:param beam_size: squence indices with top beam_size scores are returned.
:param beam_size: sequence indices with top beam_size scores are returned.
:type beam_size: double
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -6733,7 +6779,7 @@ def img_conv3d_layer(input,
bias_attr=False,
act=ReluActivation())
:param name: Layer name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: Layer Input.
:type input: LayerOutput
......@@ -6752,7 +6798,7 @@ def img_conv3d_layer(input,
:type padding: int|tuple|list
:param bias_attr: Convolution bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|False
:type bias_attr: ParameterAttribute|None|Bool|Any
:param num_channels: number of input channels. If None will be set
automatically from previous output.
:type num_channels: int
......@@ -6864,14 +6910,17 @@ def scale_shift_layer(input, name=None, param_attr=None, bias_attr=None):
scale_shift = scale_shift_layer(input=input_layer, bias_attr=False)
:param name: The Layer Name.
:param name: The name of this layer. It is optional.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput.
:param param_attr: The parameter attribute of scaling.
:type param_attr: ParameterAttribute
:param bias_attr: The parameter attribute of shifting.
:type bias_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If the parameter is set to
False or something not type of ParameterAttribute,
no bias is defined. If the parameter is set to
True, the bias is initialized to zero.
:type bias_attr: ParameterAttribute|None|Bool|Any
:return: LayerOutput object.
:rtype: LayerOutput
"""
......
......@@ -11,10 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
# from activations import *
from activations import LinearActivation, ReluActivation, SoftmaxActivation, \
IdentityActivation, TanhActivation, SequenceSoftmaxActivation
from attrs import ExtraAttr
......@@ -55,49 +53,49 @@ def sequence_conv_pool(input,
context_attr=None,
pool_attr=None):
"""
Text convolution pooling layers helper.
Text convolution pooling group.
Text input => Context Projection => FC Layer => Pooling => Output.
:param name: name of output layer(pooling layer name)
:param name: group name.
:type name: basestring
:param input: name of input layer
:param input: input layer.
:type input: LayerOutput
:param context_len: context projection length. See
context_projection's document.
:type context_len: int
:param hidden_size: FC Layer size.
:type hidden_size: int
:param context_start: context projection length. See
:param context_start: context start position. See
context_projection's context_start.
:type context_start: int or None
:type context_start: int|None
:param pool_type: pooling layer type. See pooling_layer's document.
:type pool_type: BasePoolingType.
:type pool_type: BasePoolingType
:param context_proj_layer_name: context projection layer name.
None if user don't care.
:type context_proj_layer_name: basestring
:param context_proj_param_attr: context projection parameter attribute.
None if user don't care.
:type context_proj_param_attr: ParameterAttribute or None.
:param context_proj_param_attr: padding parameter attribute of context projection layer.
If false, it means padding always be zero.
:type context_proj_param_attr: ParameterAttribute|None
:param fc_layer_name: fc layer name. None if user don't care.
:type fc_layer_name: basestring
:param fc_param_attr: fc layer parameter attribute. None if user don't care.
:type fc_param_attr: ParameterAttribute or None
:type fc_param_attr: ParameterAttribute|None
:param fc_bias_attr: fc bias parameter attribute. False if no bias,
None if user don't care.
:type fc_bias_attr: ParameterAttribute or None
:param fc_act: fc layer activation type. None means tanh
:type fc_bias_attr: ParameterAttribute|False|None
:param fc_act: fc layer activation type. None means tanh.
:type fc_act: BaseActivation
:param pool_bias_attr: pooling layer bias attr. None if don't care.
False if no bias.
:type pool_bias_attr: ParameterAttribute or None.
:param pool_bias_attr: pooling layer bias attr. False if no bias.
None if user don't care.
:type pool_bias_attr: ParameterAttribute|False|None
:param fc_attr: fc layer extra attribute.
:type fc_attr: ExtraLayerAttribute
:param context_attr: context projection layer extra attribute.
:type context_attr: ExtraLayerAttribute
:param pool_attr: pooling layer extra attribute.
:type pool_attr: ExtraLayerAttribute
:return: output layer name.
:return: layer's output.
:rtype: LayerOutput
"""
# Set Default Value to param
......@@ -163,45 +161,45 @@ def simple_img_conv_pool(input,
"""
Simple image convolution and pooling group.
Input => conv => pooling
Img input => Conv => Pooling => Output.
:param name: group name
:param name: group name.
:type name: basestring
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param filter_size: see img_conv_layer for details
:param filter_size: see img_conv_layer for details.
:type filter_size: int
:param num_filters: see img_conv_layer for details
:param num_filters: see img_conv_layer for details.
:type num_filters: int
:param pool_size: see img_pool_layer for details
:param pool_size: see img_pool_layer for details.
:type pool_size: int
:param pool_type: see img_pool_layer for details
:param pool_type: see img_pool_layer for details.
:type pool_type: BasePoolingType
:param act: see img_conv_layer for details
:param act: see img_conv_layer for details.
:type act: BaseActivation
:param groups: see img_conv_layer for details
:param groups: see img_conv_layer for details.
:type groups: int
:param conv_stride: see img_conv_layer for details
:param conv_stride: see img_conv_layer for details.
:type conv_stride: int
:param conv_padding: see img_conv_layer for details
:param conv_padding: see img_conv_layer for details.
:type conv_padding: int
:param bias_attr: see img_conv_layer for details
:param bias_attr: see img_conv_layer for details.
:type bias_attr: ParameterAttribute
:param num_channel: see img_conv_layer for details
:param num_channel: see img_conv_layer for details.
:type num_channel: int
:param param_attr: see img_conv_layer for details
:param param_attr: see img_conv_layer for details.
:type param_attr: ParameterAttribute
:param shared_bias: see img_conv_layer for details
:param shared_bias: see img_conv_layer for details.
:type shared_bias: bool
:param conv_layer_attr: see img_conv_layer for details
:param conv_layer_attr: see img_conv_layer for details.
:type conv_layer_attr: ExtraLayerAttribute
:param pool_stride: see img_pool_layer for details
:param pool_stride: see img_pool_layer for details.
:type pool_stride: int
:param pool_padding: see img_pool_layer for details
:param pool_padding: see img_pool_layer for details.
:type pool_padding: int
:param pool_layer_attr: see img_pool_layer for details
:param pool_layer_attr: see img_pool_layer for details.
:type pool_layer_attr: ExtraLayerAttribute
:return: Layer's output
:return: layer's output
:rtype: LayerOutput
"""
_conv_ = img_conv_layer(
......@@ -252,48 +250,52 @@ def img_conv_bn_pool(input,
pool_layer_attr=None):
"""
Convolution, batch normalization, pooling group.
Img input => Conv => BN => Pooling => Output.
:param name: group name
:param name: group name.
:type name: basestring
:param input: layer's input
:type input: LayerOutput
:param filter_size: see img_conv_layer's document
:param input: input layer.
:type input: LayerOutput
:param filter_size: see img_conv_layer for details.
:type filter_size: int
:param num_filters: see img_conv_layer's document
:param num_filters: see img_conv_layer for details.
:type num_filters: int
:param pool_size: see img_pool_layer's document.
:param pool_size: see img_pool_layer for details.
:type pool_size: int
:param pool_type: see img_pool_layer's document.
:param pool_type: see img_pool_layer for details.
:type pool_type: BasePoolingType
:param act: see batch_norm_layer's document.
:param act: see batch_norm_layer for details.
:type act: BaseActivation
:param groups: see img_conv_layer's document
:param groups: see img_conv_layer for details.
:type groups: int
:param conv_stride: see img_conv_layer's document.
:param conv_stride: see img_conv_layer for details.
:type conv_stride: int
:param conv_padding: see img_conv_layer's document.
:param conv_padding: see img_conv_layer for details.
:type conv_padding: int
:param conv_bias_attr: see img_conv_layer's document.
:param conv_bias_attr: see img_conv_layer for details.
:type conv_bias_attr: ParameterAttribute
:param num_channel: see img_conv_layer's document.
:param num_channel: see img_conv_layer for details.
:type num_channel: int
:param conv_param_attr: see img_conv_layer's document.
:param conv_param_attr: see img_conv_layer for details.
:type conv_param_attr: ParameterAttribute
:param shared_bias: see img_conv_layer's document.
:param shared_bias: see img_conv_layer for details.
:type shared_bias: bool
:param conv_layer_attr: see img_conv_layer's document.
:param conv_layer_attr: see img_conv_layer for details.
:type conv_layer_attr: ExtraLayerOutput
:param bn_param_attr: see batch_norm_layer's document.
:type bn_param_attr: ParameterAttribute.
:param bn_bias_attr: see batch_norm_layer's document.
:param bn_layer_attr: ParameterAttribute.
:param pool_stride: see img_pool_layer's document.
:param bn_param_attr: see batch_norm_layer for details.
:type bn_param_attr: ParameterAttribute
:param bn_bias_attr: see batch_norm_layer for details.
:type bn_bias_attr: ParameterAttribute
:param bn_layer_attr: see batch_norm_layer for details.
:type bn_layer_attr: ExtraLayerAttribute
:param pool_stride: see img_pool_layer for details.
:type pool_stride: int
:param pool_padding: see img_pool_layer's document.
:param pool_padding: see img_pool_layer for details.
:type pool_padding: int
:param pool_layer_attr: see img_pool_layer's document.
:param pool_layer_attr: see img_pool_layer for details.
:type pool_layer_attr: ExtraLayerAttribute
:return: Layer groups output
:return: layer's output
:rtype: LayerOutput
"""
__conv__ = img_conv_layer(
......@@ -348,10 +350,10 @@ def img_conv_group(input,
:param conv_batchnorm_drop_rate: if conv_with_batchnorm[i] is true,
conv_batchnorm_drop_rate[i] represents the drop rate of each batch norm.
:type conv_batchnorm_drop_rate: list
:param input: layer's input.
:param input: input layer.
:type input: LayerOutput
:param conv_num_filter: output channels num.
:type conv_num_filter: int
:param conv_num_filter: list of output channels num.
:type conv_num_filter: list|tuple
:param pool_size: pooling filter size.
:type pool_size: int
:param num_channels: input channels num.
......@@ -362,18 +364,18 @@ def img_conv_group(input,
:type conv_filter_size: int
:param conv_act: activation funciton after convolution.
:type conv_act: BaseActivation
:param conv_with_batchnorm: conv_with_batchnorm[i] represents
if there is a batch normalization after each convolution.
:param conv_with_batchnorm: if conv_with_batchnorm[i] is true,
there is a batch normalization operation after each convolution.
:type conv_with_batchnorm: list
:param pool_stride: pooling stride size.
:type pool_stride: int
:param pool_type: pooling type.
:type pool_type: BasePoolingType
:param param_attr: Convolution param attribute.
None means default attribute.
:param param_attr: param attribute of convolution layer,
None means default attribute.
:type param_attr: ParameterAttribute
:return: Layer's output
:type: LayerOutput
:return: layer's output
:rtype: LayerOutput
"""
tmp = input
......@@ -466,12 +468,14 @@ def vgg_16_network(input_image, num_channels, num_classes=1000):
"""
Same model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8
:param num_classes:
:param input_image:
:param num_classes: number of class.
:type num_classes: int
:param input_image: input layer.
:type input_image: LayerOutput
:param num_channels:
:param num_channels: input channels num.
:type num_channels: int
:return:
:return: layer's output
:rtype: LayerOutput
"""
tmp = img_conv_group(
......@@ -560,8 +564,8 @@ def simple_lstm(input,
"""
Simple LSTM Cell.
It just combine a mixed layer with fully_matrix_projection and a lstmemory
layer. The simple lstm cell was implemented as follow equations.
It just combines a mixed layer with fully_matrix_projection and a lstmemory
layer. The simple lstm cell was implemented with follow equations.
.. math::
......@@ -575,37 +579,37 @@ def simple_lstm(input,
h_t & = o_t tanh(c_t)
Please refer **Generating Sequences With Recurrent Neural Networks** if you
want to know what lstm is. Link_ is here.
Please refer to **Generating Sequences With Recurrent Neural Networks** for more
details about lstm. Link_ is here.
.. _Link: http://arxiv.org/abs/1308.0850
:param name: lstm layer name.
:type name: basestring
:param input: input layer name.
:param input: layer's input.
:type input: LayerOutput
:param size: lstm layer size.
:type size: int
:param reverse: whether to process the input data in a reverse order
:param reverse: process the input in a reverse order or not.
:type reverse: bool
:param mat_param_attr: mixed layer's matrix projection parameter attribute.
:param mat_param_attr: parameter attribute of matrix projection in mixed layer.
:type mat_param_attr: ParameterAttribute
:param bias_param_attr: bias parameter attribute. False means no bias, None
means default bias.
:type bias_param_attr: ParameterAttribute|False
:param inner_param_attr: lstm cell parameter attribute.
:param inner_param_attr: parameter attribute of lstm cell.
:type inner_param_attr: ParameterAttribute
:param act: lstm final activiation type
:param act: last activiation type of lstm.
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:param gate_act: gate activiation type of lstm.
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:param state_act: state activiation type of lstm.
:type state_act: BaseActivation
:param mixed_layer_attr: mixed layer's extra attribute.
:param mixed_layer_attr: extra attribute of mixed layer.
:type mixed_layer_attr: ExtraLayerAttribute
:param lstm_cell_attr: lstm layer's extra attribute.
:param lstm_cell_attr: extra attribute of lstm.
:type lstm_cell_attr: ExtraLayerAttribute
:return: lstm layer name.
:return: layer's output.
:rtype: LayerOutput
"""
fc_name = 'lstm_transform_%s' % name
......@@ -643,9 +647,9 @@ def lstmemory_unit(input,
lstm_bias_attr=None,
lstm_layer_attr=None):
"""
Define calculations that a LSTM unit performs during a single time step.
This function itself is not a recurrent layer, so it can not be
directly used to process sequence inputs. This function is always used in
lstmemory_unit defines the caculation process of a LSTM unit during a
single time step. This function is not a recurrent layer, so it can not be
directly used to process sequence input. This function is always used in
recurrent_group (see layers.py for more details) to implement attention
mechanism.
......@@ -676,7 +680,7 @@ def lstmemory_unit(input,
state_act=TanhActivation())
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param out_memory: output of previous time step
:type out_memory: LayerOutput | None
......@@ -684,15 +688,15 @@ def lstmemory_unit(input,
:type name: basestring
:param size: lstmemory unit size.
:type size: int
:param param_attr: Parameter config, None if use default.
:param param_attr: parameter attribute, None means default attribute.
:type param_attr: ParameterAttribute
:param act: lstm final activiation type
:param act: last activiation type of lstm.
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:param gate_act: gate activiation type of lstm.
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:param state_act: state activiation type of lstm.
:type state_act: BaseActivation
:param input_proj_bias_attr: bias attribute for input-to-hidden projection.
:param input_proj_bias_attr: bias attribute for input to hidden projection.
False means no bias, None means default bias.
:type input_proj_bias_attr: ParameterAttribute|False|None
:param input_proj_layer_attr: extra layer attribute for input to hidden
......@@ -700,8 +704,8 @@ def lstmemory_unit(input,
:type input_proj_layer_attr: ExtraLayerAttribute
:param lstm_bias_attr: bias parameter attribute of lstm layer.
False means no bias, None means default bias.
:type lstm_bias_attr: ParameterAttribute|False
:param lstm_layer_attr: lstm layer's extra attribute.
:type lstm_bias_attr: ParameterAttribute|False|None
:param lstm_layer_attr: extra attribute of lstm layer.
:type lstm_layer_attr: ExtraLayerAttribute
:return: lstmemory unit name.
:rtype: LayerOutput
......@@ -758,9 +762,9 @@ def lstmemory_group(input,
lstm_group is a recurrent_group version of Long Short Term Memory. It
does exactly the same calculation as the lstmemory layer (see lstmemory in
layers.py for the maths) does. A promising benefit is that LSTM memory
cell states, or hidden states in every time step are accessible to the
cell states(or hidden states) in every time step are accessible to the
user. This is especially useful in attention model. If you do not need to
access the internal states of the lstm, but merely use its outputs,
access the internal states of the lstm and merely use its outputs,
it is recommended to use the lstmemory, which is relatively faster than
lstmemory_group.
......@@ -781,28 +785,28 @@ def lstmemory_group(input,
gate_act=SigmoidActivation(),
state_act=TanhActivation())
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param size: lstmemory group size.
:type size: int
:param name: name of the lstmemory group.
:param name: name of lstmemory group.
:type name: basestring
:param out_memory: output of previous time step
:param out_memory: output of previous time step.
:type out_memory: LayerOutput | None
:param reverse: is lstm reversed
:param reverse: process the input in a reverse order or not.
:type reverse: bool
:param param_attr: Parameter config, None if use default.
:param param_attr: parameter attribute, None means default attribute.
:type param_attr: ParameterAttribute
:param act: lstm final activiation type
:param act: last activiation type of lstm.
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:param gate_act: gate activiation type of lstm.
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:param state_act: state activiation type of lstm.
:type state_act: BaseActivation
:param lstm_bias_attr: bias parameter attribute of lstm layer.
False means no bias, None means default bias.
:type lstm_bias_attr: ParameterAttribute|False
:param input_proj_bias_attr: bias attribute for input-to-hidden projection.
:type lstm_bias_attr: ParameterAttribute|False|None
:param input_proj_bias_attr: bias attribute for input to hidden projection.
False means no bias, None means default bias.
:type input_proj_bias_attr: ParameterAttribute|False|None
:param input_proj_layer_attr: extra layer attribute for input to hidden
......@@ -848,15 +852,15 @@ def gru_unit(input,
gru_layer_attr=None,
naive=False):
"""
Define calculations that a gated recurrent unit performs in a single time
step. This function itself is not a recurrent layer, so it can not be
directly used to process sequence inputs. This function is always used in
gru_unit defines the calculation process of a gated recurrent unit during a single
time step. This function is not a recurrent layer, so it can not be
directly used to process sequence input. This function is always used in
the recurrent_group (see layers.py for more details) to implement attention
mechanism.
Please see grumemory in layers.py for the details about the maths.
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param memory_boot: the initialization state of the LSTM cell.
:type memory_boot: LayerOutput | None
......@@ -864,12 +868,12 @@ def gru_unit(input,
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param act: type of the activation
:param act: activation type of gru
:type act: BaseActivation
:param gate_act: type of the gate activation
:param gate_act: gate activation type or gru
:type gate_act: BaseActivation
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:param gru_layer_attr: Extra attribute of the gru layer.
:type gru_layer_attr: ExtraLayerAttribute
:return: the gru output layer.
:rtype: LayerOutput
"""
......@@ -915,7 +919,7 @@ def gru_group(input,
does exactly the same calculation as the grumemory layer does. A promising
benefit is that gru hidden states are accessible to the user. This is
especially useful in attention model. If you do not need to access
any internal state, but merely use the outputs of a GRU, it is recommended
any internal state and merely use the outputs of a GRU, it is recommended
to use the grumemory, which is relatively faster.
Please see grumemory in layers.py for more detail about the maths.
......@@ -924,12 +928,12 @@ def gru_group(input,
.. code-block:: python
gru = gur_group(input=[layer1],
gru = gru_group(input=[layer1],
size=256,
act=TanhActivation(),
gate_act=SigmoidActivation())
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param memory_boot: the initialization state of the LSTM cell.
:type memory_boot: LayerOutput | None
......@@ -937,16 +941,17 @@ def gru_group(input,
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param reverse: whether to process the input data in a reverse order
:param reverse: process the input in a reverse order or not.
:type reverse: bool
:param act: type of the activiation
:param act: activiation type of gru
:type act: BaseActivation
:param gate_act: type of the gate activiation
:param gate_act: gate activiation type of gru
:type gate_act: BaseActivation
:param gru_bias_attr: bias. False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:param gru_bias_attr: bias parameter attribute of gru layer,
False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False|None
:param gru_layer_attr: Extra attribute of the gru layer.
:type gru_layer_attr: ExtraLayerAttribute
:return: the gru group.
:rtype: LayerOutput
"""
......@@ -986,11 +991,11 @@ def simple_gru(input,
gru_layer_attr=None,
naive=False):
"""
You maybe see gru_step_layer, grumemory in layers.py, gru_unit, gru_group,
You may see gru_step_layer, grumemory in layers.py, gru_unit, gru_group,
simple_gru in network.py. The reason why there are so many interfaces is
that we have two ways to implement recurrent neural network. One way is to
use one complete layer to implement rnn (including simple rnn, gru and lstm)
with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But,
with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But
the multiplication operation :math:`W x_t` is not computed in these layers.
See details in their interfaces in layers.py.
The other implementation is to use an recurrent group which can ensemble a
......@@ -1018,22 +1023,23 @@ def simple_gru(input,
gru = simple_gru(input=[layer1], size=256)
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param name: name of the gru group.
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param reverse: whether to process the input data in a reverse order
:param reverse: process the input in a reverse order or not.
:type reverse: bool
:param act: type of the activiation
:param act: activiation type of gru
:type act: BaseActivation
:param gate_act: type of the gate activiation
:param gate_act: gate activiation type of gru
:type gate_act: BaseActivation
:param gru_bias_attr: bias. False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:param gru_bias_attr: bias parameter attribute of gru layer,
False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False|None
:param gru_layer_attr: Extra attribute of the gru layer.
:type gru_layer_attr: ExtraLayerAttribute
:return: the gru group.
:rtype: LayerOutput
"""
......@@ -1071,8 +1077,8 @@ def simple_gru2(input,
mixed_layer_attr=None,
gru_cell_attr=None):
"""
simple_gru2 is the same with simple_gru, but using grumemory instead
Please see grumemory in layers.py for more detail about the maths.
simple_gru2 is the same with simple_gru, but using grumemory instead.
Please refer to grumemory in layers.py for more detail about the math.
simple_gru2 is faster than simple_gru.
The example usage is:
......@@ -1081,22 +1087,23 @@ def simple_gru2(input,
gru = simple_gru2(input=[layer1], size=256)
:param input: input layer name.
:param input: input layer.
:type input: LayerOutput
:param name: name of the gru group.
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param reverse: whether to process the input data in a reverse order
:param reverse: process the input in a reverse order or not.
:type reverse: bool
:param act: type of the activiation
:param act: activiation type of gru
:type act: BaseActivation
:param gate_act: type of the gate activiation
:param gate_act: gate activiation type of gru
:type gate_act: BaseActivation
:param gru_bias_attr: bias. False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:param gru_bias_attr: bias parameter attribute of gru layer,
False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False|None
:param gru_layer_attr: Extra attribute of the gru layer.
:type gru_layer_attr: ExtraLayerAttribute
:return: the gru group.
:rtype: LayerOutput
"""
......@@ -1145,7 +1152,7 @@ def bidirectional_gru(input,
concat_act=None):
"""
A bidirectional_gru is a recurrent unit that iterates over the input
sequence both in forward and bardward orders, and then concatenate two
sequence both in forward and backward orders, and then concatenate two
outputs to form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.
......@@ -1162,11 +1169,10 @@ def bidirectional_gru(input,
:type input: LayerOutput
:param size: gru layer size.
:type size: int
:param return_seq: If set False, outputs of the last time step are
concatenated and returned.
If set True, the entire output sequences that are
processed in forward and backward directions are
:param return_seq: If set False, the last time step of output are
concatenated and returned.
If set True, the entire output sequences in forward
and backward directions are concatenated and returned.
:type return_seq: bool
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -1230,8 +1236,8 @@ def bidirectional_lstm(input,
concat_act=None):
"""
A bidirectional_lstm is a recurrent unit that iterates over the input
sequence both in forward and bardward orders, and then concatenate two
outputs form a final output. However, concatenation of two outputs
sequence both in forward and backward orders, and then concatenate two
outputs to form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.
......@@ -1252,13 +1258,12 @@ def bidirectional_lstm(input,
:type input: LayerOutput
:param size: lstm layer size.
:type size: int
:param return_seq: If set False, outputs of the last time step are
concatenated and returned.
If set True, the entire output sequences that are
processed in forward and backward directions are
:param return_seq: If set False, the last time step of output are
concatenated and returned.
If set True, the entire output sequences in forward
and backward directions are concatenated and returned.
:type return_seq: bool
:return: LayerOutput object accroding to the return_seq.
:return: LayerOutput object.
:rtype: LayerOutput
"""
args = locals()
......@@ -1303,7 +1308,7 @@ def simple_attention(encoded_sequence,
weight_act=None,
name=None):
"""
Calculate and then return a context vector by attention machanism.
Calculate and return a context vector with attention mechanism.
Size of the context vector equals to size of the encoded_sequence.
.. math::
......@@ -1336,10 +1341,10 @@ def simple_attention(encoded_sequence,
:param name: name of the attention model.
:type name: basestring
:param softmax_param_attr: parameter attribute of sequence softmax
that is used to produce attention weight
that is used to produce attention weight.
:type softmax_param_attr: ParameterAttribute
:param weight_act: activation of the attention model
:type weight_act: Activation
:param weight_act: activation of the attention model.
:type weight_act: BaseActivation
:param encoded_sequence: output of the encoder
:type encoded_sequence: LayerOutput
:param encoded_proj: attention weight is computed by a feed forward neural
......@@ -1411,7 +1416,7 @@ def inputs(layers, *args):
def outputs(layers, *args):
"""
Declare the outputs of network. If user have not defined the inputs of
Declare the outputs of network. If user has not defined the inputs of
network, this method will calculate the input order by dfs travel.
:param layers: Output layers.
......
......@@ -28,10 +28,10 @@ def create_op(scope, op_type, inputs, outputs, attrs):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_in = outputs[out_name]
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[out_name].append(sub_in_name)
sub_out = outputs[out_name]
for sub_out_name, _ in sub_out:
var = scope.new_var(sub_out_name)
kwargs[out_name].append(sub_out_name)
else:
var = scope.new_var(out_name)
kwargs[out_name].append(out_name)
......@@ -39,6 +39,7 @@ def create_op(scope, op_type, inputs, outputs, attrs):
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
......@@ -47,17 +48,24 @@ def set_input(scope, op, inputs, place):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for sub_in_name, sub_in_array in sub_in:
for sub_in_name, sub_in_val in sub_in:
var = scope.find_var(sub_in_name)
tensor = var.get_tensor()
sub_in_array = sub_in_val[0] \
if isinstance(sub_in_val, tuple) else sub_in_val
tensor.set_dims(sub_in_array.shape)
tensor.set(sub_in_array, place)
if isinstance(sub_in_val, tuple):
tensor.set_lod(sub_in_val[1])
else:
var = scope.find_var(in_name)
tensor = var.get_tensor()
arr = inputs[in_name]
tensor.set_dims(arr.shape)
tensor.set(arr, place)
in_val = inputs[in_name]
in_array = in_val[0] if isinstance(in_val, tuple) else in_val
tensor.set_dims(in_array.shape)
tensor.set(in_array, place)
if isinstance(in_val, tuple):
tensor.set_lod(in_val[1])
def set_output_grad(scope, op, outputs, place):
......@@ -172,8 +180,9 @@ class OpTest(unittest.TestCase):
def check_output_with_place(self, place, atol):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
op_attrs)
if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
return
......@@ -183,12 +192,18 @@ class OpTest(unittest.TestCase):
self.op.run(self.scope, ctx)
for out_name, out_dup in Operator.get_op_outputs(self.op.type()):
if out_name not in self.outputs:
continue
if out_dup:
sub_out = self.outputs[out_name]
for sub_out_name in sub_out:
if not isinstance(sub_out, list):
raise AssertionError("sub_out type %s is not list",
type(sub_out))
for sub_out_name, expect in sub_out:
actual = np.array(
self.scope.find_var(sub_out_name).get_tensor())
expect = sub_out[sub_out_name]
self.assertTrue(
np.allclose(
actual, expect, atol=atol),
......@@ -236,8 +251,9 @@ class OpTest(unittest.TestCase):
max_relative_error=0.005):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
op_attrs)
if no_grad_set is None:
no_grad_set = set()
......
......@@ -6,16 +6,17 @@ from op_test import OpTest
class TestAccuracyOp(OpTest):
def setUp(self):
self.op_type = "accuracy"
infer = np.random.randint(0, 2, (32, 1)).astype("int")
label = np.random.randint(0, 2, (32, )).astype("int")
n = 8192
infer = np.random.randint(0, 2, (n, 1)).astype("int")
label = np.random.randint(0, 2, (n, )).astype("int")
self.inputs = {'Inference': infer, "Label": label}
num_correct = 0
for rowid in xrange(32):
for rowid in xrange(n):
for ele in infer[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {'Accuracy': [num_correct / 32.0]}
self.outputs = {'Accuracy': [num_correct / float(n)]}
def test_check_output(self):
self.check_output()
......
import unittest
import numpy
import numpy as np
from op_test import OpTest
class TestCrossEntropy(OpTest):
class TestCrossEntropyOp1(OpTest):
"""Test standard cross-entropy, with index representation of labels.
"""
def setUp(self):
self.op_type = "onehot_cross_entropy"
self.op_type = "cross_entropy"
batch_size = 30
class_num = 10
X = numpy.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float32")
labels = numpy.random.randint(0, class_num, batch_size, dtype="int32")
cross_entropy = numpy.asmatrix(
[[-numpy.log(X[i][labels[i]])] for i in range(X.shape[0])],
X = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float32")
label = np.random.randint(0, class_num, (batch_size, 1), dtype="int32")
cross_entropy = np.asmatrix(
[[-np.log(X[i][label[i][0]])] for i in range(X.shape[0])],
dtype="float32")
self.inputs = {"X": X, "label": labels}
self.inputs = {"X": X, "Label": label}
self.outputs = {"Y": cross_entropy}
self.attrs = {'soft_label': 0}
def test_check_output(self):
self.check_output()
......@@ -26,5 +28,62 @@ class TestCrossEntropy(OpTest):
self.check_grad(["X"], "Y")
class TestCrossEntropyOp2(OpTest):
"""Test soft-label cross-entropy, with vecterized soft labels.
"""
def setUp(self):
self.op_type = "cross_entropy"
batch_size = 10
class_num = 5
X = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float32")
label = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float32")
label /= label.sum(axis=1, keepdims=True)
cross_entropy = (-label * np.log(X)).sum(
axis=1, keepdims=True).astype("float32")
self.inputs = {'X': X, 'Label': label}
self.outputs = {'Y': cross_entropy}
self.attrs = {'soft_label': 1}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y')
class TestCrossEntropyOp3(OpTest):
"""Test one-hot cross-entropy, with vecterized one-hot representation of
labels.
"""
def setUp(self):
self.op_type = "cross_entropy"
batch_size = 30
class_num = 10
X = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float32")
label_index = np.random.randint(
0, class_num, (batch_size), dtype="int32")
label = np.zeros(X.shape)
label[np.arange(batch_size), label_index] = 1
cross_entropy = np.asmatrix(
[[-np.log(X[i][label_index[i]])] for i in range(X.shape[0])],
dtype="float32")
cross_entropy2 = (-label * np.log(X)).sum(
axis=1, keepdims=True).astype("float32")
self.inputs = {'X': X, 'Label': label}
self.outputs = {'Y': cross_entropy}
self.attrs = {'soft_label': 1}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y')
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestDropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'is_training': 1}
self.outputs = {'Out': self.inputs['X'], 'Mask': np.ones((32, 64))}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', max_relative_error=0.05)
class TestDropoutOp2(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 1.0, 'is_training': 1}
self.outputs = {'Out': np.zeros((32, 64)), 'Mask': np.zeros((32, 64))}
class TestDropoutOp3(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'is_training': 1}
self.outputs = {'Out': self.inputs['X'], 'Mask': np.ones((32, 64, 2))}
class TestDropoutOp4(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.35, 'is_training': 0}
self.outputs = {'Out': self.inputs['X'] * self.attrs['dropout_prob']}
def test_check_output(self):
self.check_output()
class TestDropoutOp5(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = {'dropout_prob': 0.75, 'is_training': 0}
self.outputs = {'Out': self.inputs['X'] * self.attrs['dropout_prob']}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestFCOp1(OpTest):
def setUp(self):
x0 = np.random.random((16, 32)).astype("float32")
w0 = np.random.random((32, 10)).astype("float32")
mul_out0 = np.dot(x0, w0)
identity_out = mul_out0
self.op_type = "fc"
self.inputs = {"X": [("X0", x0)], "W": [("W0", w0)]}
self.outputs = {"MulOut": [("MulOut0", mul_out0)], "Out": identity_out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X0", "W0"], "Out", max_relative_error=0.01)
class TestFCOp2(OpTest):
def setUp(self):
x0 = np.random.random((16, 4, 8)).astype("float32")
x1 = np.random.random((4, 4, 32)).astype("float32")
w0 = np.random.random((32, 10)).astype("float32")
w1 = np.random.random((32, 10)).astype("float32")
b = np.random.random(10).astype("float32")
mul_out0 = np.dot(x0.reshape(16, 4 * 8), w0)
mul_out1 = np.dot(x1.reshape(4 * 4, 32), w1)
sum_out = mul_out0 + mul_out1
add_out = np.add(sum_out, b)
sigmoid_out = 1 / (1 + np.exp(-add_out))
self.op_type = "fc"
self.inputs = {
"X": [("X0", x0), ("X1", x1)],
"W": [("W0", w0), ("W1", w1)],
"B": b
}
self.attrs = {"xNumColDims": [1, 2], "activation": "sigmoid"}
self.outputs = {
"MulOut": [("MulOut0", mul_out0), ("MulOut1", mul_out1)],
"SumOut": sum_out,
"AddOut": add_out,
"Out": sigmoid_out
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
["X0", "X1", "W0", "W1", "B"], "Out", max_relative_error=0.01)
if __name__ == '__main__':
unittest.main()
......@@ -4,7 +4,7 @@ from paddle.v2.framework.op import Operator
import numpy
class GaussianRandomTest(unittest.TestCase):
class TestGaussianRandomOp(unittest.TestCase):
def test_cpu(self):
self.gaussian_random_test(place=core.CPUPlace())
......
import unittest
import numpy as np
from op_test import OpTest
class TestIdentityOp(OpTest):
def setUp(self):
self.op_type = "identity"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Y': self.inputs['X']}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y')
if __name__ == "__main__":
unittest.main()
......@@ -3,7 +3,7 @@ import numpy as np
from op_test import OpTest
class MinusOpTest(OpTest):
class TestMinusOp(OpTest):
def setUp(self):
self.op_type = "minus"
self.inputs = {
......
......@@ -128,7 +128,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
def cross_entropy_layer(net, input, label):
cost_name = "cross_entropy_%d" % uniq_id()
cross_entropy_op = Operator(
"onehot_cross_entropy", X=input, label=label, Y=cost_name)
"cross_entropy", X=input, Label=label, Y=cost_name)
net.append_op(cross_entropy_op)
scope.new_var(cost_name)
net.infer_shape(scope)
......@@ -181,7 +181,7 @@ def error_rate(predict, label):
images = data_layer(name="pixel", dims=[BATCH_SIZE, 784])
labels = data_layer(name="label", dims=[BATCH_SIZE])
labels = data_layer(name="label", dims=[BATCH_SIZE, 1])
fc1 = fc_layer(net=forward_net, input=images, size=100, act="sigmoid")
fc2 = fc_layer(net=forward_net, input=fc1, size=100, act="sigmoid")
predict = fc_layer(net=forward_net, input=fc2, size=10, act="softmax")
......@@ -215,6 +215,7 @@ def test(cost_name):
for data in test_reader():
image_data = numpy.array(map(lambda x: x[0], data)).astype("float32")
label_data = numpy.array(map(lambda x: x[1], data)).astype("int32")
label_data = numpy.expand_dims(label_data, axis=1)
feed_data(images, image_data)
feed_data(labels, label_data)
......@@ -235,6 +236,7 @@ for pass_id in range(PASS_NUM):
for data in train_reader():
image_data = numpy.array(map(lambda x: x[0], data)).astype("float32")
label_data = numpy.array(map(lambda x: x[1], data)).astype("int32")
label_data = numpy.expand_dims(label_data, axis=1)
feed_data(images, image_data)
feed_data(labels, label_data)
......
import unittest
import numpy as np
from op_test import OpTest
class PReluTest(OpTest):
def setUp(self):
self.op_type = "prelu"
x_np = np.random.normal(size=(10, 10)).astype("float32")
x_np_sign = np.sign(x_np)
x_np = x_np_sign * np.maximum(x_np, .005)
alpha_np = np.array([.1])
self.inputs = {'X': x_np, 'Alpha': alpha_np}
out_np = np.maximum(self.inputs['X'], 0.)
out_np = out_np + np.minimum(self.inputs['X'],
0.) * self.inputs['Alpha']
assert out_np is not self.inputs['X']
self.outputs = {'Out': out_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == "__main__":
unittest.main()
......@@ -3,20 +3,7 @@ import numpy as np
from op_test import OpTest
class IdentityTest(OpTest):
def setUp(self):
self.op_type = "identity"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class ScaleTest(OpTest):
class TestScaleOp(OpTest):
def setUp(self):
self.op_type = "scale"
self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
......
import unittest
import numpy as np
from op_test import OpTest
class TestSeqAvgPool1D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]]
out = np.zeros((4, 23)).astype('float32')
for i in range(4):
sub_x = x[lod[0][i]:lod[0][i + 1], :]
out[i] = sub_x.mean(axis=0)
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSeqAvgPool2D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]]
out = np.zeros((4, 3, 17)).astype('float32')
for i in range(4):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
......@@ -3,7 +3,7 @@ import numpy as np
from op_test import OpTest
class TestSGD(OpTest):
class TestSGDOp(OpTest):
def setUp(self):
self.op_type = "sgd"
w = np.random.random((102, 105)).astype("float32")
......
......@@ -3,7 +3,7 @@ import numpy as np
from op_test import OpTest
class TestSigmoid(OpTest):
class TestSigmoidOp(OpTest):
def setUp(self):
self.op_type = "sigmoid"
self.inputs = {
......
import unittest
import numpy as np
from op_test import OpTest
class TestSplitOp(OpTest):
def setUp(self):
self.op_type = "split"
axis = 0
num = 2
x = np.random.random((4, 2)).astype('float32')
out = np.split(x, num, axis)
self.inputs = {'X': x}
self.attrs = {'axis': axis, 'num': num}
self.outputs = {'Out': [('out%d' % i, out[i]) \
for i in xrange(len(out))]}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], ['out0', 'out1'])
if __name__ == '__main__':
unittest.main()
......@@ -21,6 +21,9 @@ class TestTopkOp(OpTest):
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
self.check_output()
class TestTopkOp3d(OpTest):
def setUp(self):
......@@ -42,6 +45,9 @@ class TestTopkOp3d(OpTest):
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
......@@ -4,7 +4,7 @@ import paddle.v2.framework.core as core
import numpy
class UniformRandomTest(unittest.TestCase):
class TestUniformRandomOp(unittest.TestCase):
def test_uniform_random_cpu(self):
self.uniform_random_test(place=core.CPUPlace())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册