diff --git a/.gitignore b/.gitignore index 1c9730a5ad57cd70613c0692529bcb1ccf056d59..6aae076a49012b032b8fc0f1dc02c2714fb7b4a3 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ build/ .pydevproject Makefile .test_env/ +third_party/ *~ bazel-* +third_party/ diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index f635e65784af47a21df80cc92073ef14eba9a731..0000000000000000000000000000000000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "warp-ctc"] - path = warp-ctc - url = https://github.com/baidu-research/warp-ctc.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b9902a863d864b28f0fad0fefe64248e356010e4..a6e45028ebc3f53ea20806f0dd2a7acc820607fe 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ sha: c25201a00e6b0514370501050cf2a8538ac12270 hooks: - id: remove-crlf - files: (?!.*warp-ctc)^.*$ + files: (?!.*third_party)^.*$ - repo: https://github.com/reyoung/mirrors-yapf.git sha: v0.13.2 hooks: @@ -15,7 +15,7 @@ - id: check-merge-conflict - id: check-symlinks - id: detect-private-key - files: (?!.*warp-ctc)^.*$ + files: (?!.*third_party)^.*$ - id: end-of-file-fixer - repo: https://github.com/PaddlePaddle/clang-format-pre-commit-hook.git sha: 28c0ea8a67a3e2dbbf4822ef44e85b63a0080a29 diff --git a/.travis.yml b/.travis.yml index 047ca6ffe79bdaf013f6ef6dbf1a82bdb2f1f2b3..0705baa1aca8b480b2a774076bd91fb9df401a53 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,9 @@ language: cpp -cache: ccache +cache: + directories: + - $HOME/third_party + - $HOME/.ccache + - $HOME/.cache/pip sudo: required dist: trusty os: @@ -21,26 +25,21 @@ addons: packages: - gcc-4.8 - g++-4.8 - - wget - git - build-essential - libatlas-base-dev - python - python-pip - python2.7-dev - - m4 - python-numpy - python-wheel - - libgoogle-glog-dev - - libgflags-dev - - libgtest-dev - curl - - lcov - - graphviz - swig + - graphviz - clang-format-3.8 - automake - libtool + - ccache before_install: - | if [ ${JOB} == "BUILD_AND_TEST" ]; then @@ -53,10 +52,9 @@ before_install: fi fi fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - - pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit requests==2.9.2 LinkChecker + - pip install numpy wheel protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh notifications: diff --git a/CMakeLists.txt b/CMakeLists.txt index 3fd5c7187153675d820cd02f1fdddd47e8b31166..8f53abacb4052febc23c6ac76f2481776730056e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,166 +1,99 @@ -cmake_minimum_required(VERSION 2.8) +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +cmake_minimum_required(VERSION 3.0) project(paddle CXX C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(PROJ_ROOT ${CMAKE_SOURCE_DIR}) -include(package) -find_package(SWIG 2.0) -find_package(CUDA QUIET) -find_package(Protobuf REQUIRED) - -# Check protobuf library version. -execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version - OUTPUT_VARIABLE PROTOBUF_VERSION) -string(REPLACE "libprotoc " "" PROTOBUF_VERSION ${PROTOBUF_VERSION}) - -set(PROTOBUF_3 OFF) -if (${PROTOBUF_VERSION} VERSION_GREATER "3.0.0" OR ${PROTOBUF_VERSION} VERSION_EQUAL "3.0.0") - set(PROTOBUF_3 ON) -endif() -find_package(PythonLibs 2.7 REQUIRED) -find_package(PythonInterp 2.7 REQUIRED) -find_package(ZLIB REQUIRED) -find_package(NumPy REQUIRED) -find_package(Threads REQUIRED) -find_package(AVX QUIET) -find_package(Glog REQUIRED) -find_package(gflags COMPONENTS nothreads_static REQUIRED) -find_package(GTest) find_package(Sphinx) -find_package(Doxygen) -include(cblas) -find_program(M4_EXECUTABLE m4) -###################### Configurations ########################### -option(WITH_DSO "Compile PaddlePaddle with dynamic linked libraries" ON) -option(WITH_GPU "Compile PaddlePaddle with gpu" ${CUDA_FOUND}) -option(WITH_DOUBLE "Compile PaddlePaddle with double precision, otherwise use single precision" OFF) -option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND}) -option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) -option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND}) -option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) -option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) -option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) -option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) -option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) -option(WITH_SWIG_PY "Compile PaddlePaddle with py PaddlePaddle prediction api" ${SWIG_FOUND}) -option(ON_TRAVIS "Running test on travis-ci or not." OFF) -option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF) -option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON) - - -include(cpplint) -include(ccache) -if(WITH_RDMA) - include(rdma) -endif() -include(util) -include(flags) -include(cudnn) -include(FindPythonModule) -include(check_packages) -include(swig) -include(coveralls) - -# Set PaddlePaddle version to Git tag name or Git commit ID. +find_package(CUDA QUIET) find_package(Git REQUIRED) -# version.cmake will get the current PADDLE_VERSION -include(version) -add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION}) - -if(NOT WITH_GPU) - add_definitions(-DPADDLE_ONLY_CPU) - add_definitions(-DHPPL_STUB_FUNC) - - list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) -else() - if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) - message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile") - endif() - - if(NOT CUDNN_FOUND) - message(FATAL_ERROR "Paddle need cudnn to compile") - endif() - - if(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") - else(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") - endif(WITH_AVX) - - # Include cuda and cudnn - include_directories(${CUDNN_INCLUDE_DIR}) - include_directories(${CUDA_TOOLKIT_INCLUDE}) -endif(NOT WITH_GPU) - -if(WITH_DSO) - add_definitions(-DPADDLE_USE_DSO) -endif(WITH_DSO) - -if(WITH_DOUBLE) - add_definitions(-DPADDLE_TYPE_DOUBLE) - set(ACCURACY double) -else(WITH_DOUBLE) - set(ACCURACY float) -endif(WITH_DOUBLE) - -if(NOT WITH_TIMER) - add_definitions(-DPADDLE_DISABLE_TIMER) -endif(NOT WITH_TIMER) - -if(NOT WITH_PROFILER) - add_definitions(-DPADDLE_DISABLE_PROFILER) -endif(NOT WITH_PROFILER) - -if(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") -else(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") -endif(WITH_AVX) - -if(WITH_PYTHON) - include_directories(${PYTHON_INCLUDE_DIR}) - include_directories(${PYTHON_NUMPY_INCLUDE_DIR}) -else(WITH_PYTHON) - add_definitions(-DPADDLE_NO_PYTHON) -endif(WITH_PYTHON) - -if(WITH_RDMA) - include_directories("${RDMA_INC_DIR}") -else(WITH_RDMA) - add_definitions(-DPADDLE_DISABLE_RDMA) -endif(WITH_RDMA) - -# glog -include_directories(${LIBGLOG_INCLUDE_DIR}) - -#gflags -add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE}) -include_directories(${GFLAGS_INCLUDE_DIRS}) +find_package(Threads REQUIRED) -if(WITH_TESTING) - enable_testing() - include_directories(${GTEST_INCLUDE_DIRS}) +include(system) +include(simd) + +################################ Configurations ####################################### +option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND}) +option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) +option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" OFF) +option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) +option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) +option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) +option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) +option(WITH_DOUBLE "Compile PaddlePaddle with double precision" OFF) +option(WITH_RDMA "Compile PaddlePaddle with RDMA support" OFF) +option(WITH_TIMER "Compile PaddlePaddle with stats timer" OFF) +option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler" OFF) +option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) +option(ON_COVERALLS "Compile PaddlePaddle with code coverage" OFF) +option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) +option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) + +# CMAKE_BUILD_TYPE +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING + "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel" + FORCE) endif() -include_directories("${CBLAS_INC_DIR}") +set(THIRD_PARTY_PATH "${PROJ_ROOT}/third_party" CACHE STRING + "A path setting third party libraries download & build directories.") +######################################################################################## + +include(external/zlib) # download, build, install zlib +include(external/gflags) # download, build, install gflags +include(external/glog) # download, build, install glog +include(external/gtest) # download, build, install gtest +include(external/protobuf) # download, build, install protobuf +include(external/python) # download, build, install python +include(external/openblas) # download, build, install openblas +include(external/swig) # download, build, install swig +include(external/warpctc) # download, build, install warpctc + +include(package) # set paddle packages +include(cpplint) # set paddle c++ style +include(ccache) # set ccache for compilation +include(util) # set unittest and link libs +include(rdma) # set rdma libraries +include(flags) # set paddle compile flags +include(cudnn) # set cudnn libraries +include(version) # set PADDLE_VERSION +include(coveralls) # set code coverage + +include(configure) # add paddle env configuration + include_directories("${PROJ_ROOT}") include_directories("${PROJ_ROOT}/paddle/cuda/include") -include_directories(${PROTOBUF_INCLUDE_DIRS}) include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto") -if(EXISTS "${PROJ_ROOT}/paddle/internals/CMakeLists.txt") - set(PADDLE_WITH_INTERNAL ON) - include(paddle/internals/CMakeLists.txt) -else() - set(PADDLE_WITH_INTERNAL OFF) - set(INTERNAL_PROTO_PATH "") -endif() + +set(EXTERNAL_LIBS + # have not include gtest here. + ${GFLAGS_LIBRARIES} + ${GLOG_LIBRARIES} + ${CBLAS_LIBRARIES} + ${PROTOBUF_LIBRARY} + ${ZLIB_LIBRARIES} +) + add_subdirectory(proto) add_subdirectory(paddle) add_subdirectory(python) + if(WITH_DOC) add_subdirectory(doc) endif() diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 685334c6585060c0344e552c6f3fda2c7324de03..4e1ae7dc81231943c4bf3db4d4ac6f073f4fd1c4 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -13,6 +13,7 @@ # system paths. # +set(CBLAS_FOUND OFF) ## Find MKL First. set(MKL_ROOT $ENV{MKL_ROOT} CACHE PATH "Folder contains MKL") @@ -35,11 +36,12 @@ find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS if(MKL_INCLUDE_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64) set(CBLAS_PROVIDER MKL) set(CBLAS_INC_DIR ${MKL_INCLUDE_DIR}) - set(CBLAS_LIBS ${MKL_INTEL_LP64} + set(CBLAS_LIBRARIES ${MKL_INTEL_LP64} ${MKL_SEQUENTIAL_LIB} ${MKL_CORE_LIB}) add_definitions(-DPADDLE_USE_MKL) - message(STATUS "Found MKL (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + message(STATUS "Found MKL (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + set(CBLAS_FOUND ON) return() # return file. endif() @@ -68,9 +70,10 @@ find_library(ATLAS_LIB NAMES lapack_atlas liblapack_atlas.so.3 if(ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_LIB) set(CBLAS_PROVIDER ATLAS) set(CBLAS_INC_DIR ${ATLAS_INC_DIR} ${ATLAS_CLAPACK_INC_DIR}) - set(CBLAS_LIBS ${ATLAS_LIB} ${ATLAS_CBLAS_LIB}) + set(CBLAS_LIBRARIES ${ATLAS_LIB} ${ATLAS_CBLAS_LIB}) add_definitions(-DPADDLE_USE_ATLAS) - message(STATUS "Found Atlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + message(STATUS "Found Atlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + set(CBLAS_FOUND ON) return() endif() @@ -98,8 +101,9 @@ find_library(OPENBLAS_LIB NAMES openblas if(OPENBLAS_INC_DIR AND OPENBLAS_LIB) set(CBLAS_PROVIDER OPENBLAS) set(CBLAS_INC_DIR ${OPENBLAS_INC_DIR}) - set(CBLAS_LIBS ${OPENBLAS_LIB}) - message(STATUS "Found OpenBlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + set(CBLAS_LIBRARIES ${OPENBLAS_LIB}) + message(STATUS "Found OpenBlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + set(CBLAS_FOUND ON) return() endif() @@ -130,9 +134,7 @@ find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS if (REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) set(CBLAS_PROVIDER REFERENCE) set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) - set(CBLAS_LIBS ${REFERENCE_CBLAS_LIBRARY}) - return() + set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY}) + message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + set(CBLAS_FOUND ON) endif() - -message(FATAL_ERROR "CBlas must be set. Paddle support MKL, ATLAS, OpenBlas, reference-cblas." - " Try set MKL_ROOT, ATLAS_ROOT, OPENBLAS_ROOT or REFERENCE_CBLAS_ROOT.") diff --git a/cmake/check_packages.cmake b/cmake/check_packages.cmake deleted file mode 100644 index afb84c6ff52af05769a99246d2e93380832c04e0..0000000000000000000000000000000000000000 --- a/cmake/check_packages.cmake +++ /dev/null @@ -1,39 +0,0 @@ -# Check package for each cmake option - -if(WITH_GPU) - find_package(CUDA REQUIRED) # CUDA is required when use gpu -endif() - -if(WITH_PYTHON) - find_package(PythonLibs 2.6 REQUIRED) - find_package(PythonInterp REQUIRED) - find_package(NumPy REQUIRED) -endif() - -if(WITH_STYLE_CHECK) - find_package(PythonInterp REQUIRED) -endif() - -find_package(Glog REQUIRED) - -find_package(Gflags REQUIRED) - -if(WITH_TESTING) - find_package(GTest REQUIRED) -endif() - -if(WITH_DOC) - find_package(Sphinx REQUIRED) - find_python_module(recommonmark REQUIRED) -endif() - -if(WITH_SWIG_PY) - if(NOT SWIG_FOUND) - message(FATAL_ERROR "SWIG is not found. Please install swig or disable WITH_SWIG_PY") - endif() - find_python_module(wheel REQUIRED) # package wheel -endif() - -if(NOT M4_EXECUTABLE) - message(FATAL_ERROR "Paddle need m4 to generate proto file.") -endif() diff --git a/cmake/configure.cmake b/cmake/configure.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0bb016201dd8ae912ac8ec9f925bc5277fad7aed --- /dev/null +++ b/cmake/configure.cmake @@ -0,0 +1,68 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(NOT WITH_PYTHON) + add_definitions(-DPADDLE_NO_PYTHON) +endif(NOT WITH_PYTHON) + +if(WITH_DSO) + add_definitions(-DPADDLE_USE_DSO) +endif(WITH_DSO) + +if(WITH_DOUBLE) + add_definitions(-DPADDLE_TYPE_DOUBLE) +endif(WITH_DOUBLE) + +if(NOT WITH_TIMER) + add_definitions(-DPADDLE_DISABLE_TIMER) +endif(NOT WITH_TIMER) + +if(NOT WITH_PROFILER) + add_definitions(-DPADDLE_DISABLE_PROFILER) +endif(NOT WITH_PROFILER) + +if(NOT WITH_GPU) + add_definitions(-DPADDLE_ONLY_CPU) + add_definitions(-DHPPL_STUB_FUNC) + + list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) +else() + FIND_PACKAGE(CUDA REQUIRED) + + if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) + message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile") + endif() + + if(NOT CUDNN_FOUND) + message(FATAL_ERROR "Paddle need cudnn to compile") + endif() + + if(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") + else(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") + endif(WITH_AVX) + + # Include cuda and cudnn + include_directories(${CUDNN_INCLUDE_DIR}) + include_directories(${CUDA_TOOLKIT_INCLUDE}) +endif(NOT WITH_GPU) + +if(WITH_AVX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") +else(WITH_AVX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") +endif(WITH_AVX) diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake index 241af9a0835b2f100c8fb8b246426e631e42aef3..38c636b30edc0af1c07255814e8bc2b1ad9514da 100644 --- a/cmake/cpplint.cmake +++ b/cmake/cpplint.cmake @@ -53,7 +53,7 @@ macro(add_style_check_target TARGET_NAME) if(LINT MATCHES ON) add_custom_command(TARGET ${TARGET_NAME} PRE_BUILD - COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" + COMMAND env ${py_env} "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" "--filter=${STYLE_FILTER}" ${filename} WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}) endif() diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d38b7d1ba2a74d5bb46d0c07e3abe6832d4c8af3 --- /dev/null +++ b/cmake/external/gflags.cmake @@ -0,0 +1,39 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(GFLAGS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags) +SET(GFLAGS_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/gflags) +SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE) +IF(WIN32) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) +ELSE(WIN32) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) +ENDIF(WIN32) + +INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR}) + +ExternalProject_Add( + gflags + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/gflags/gflags.git" + PREFIX ${GFLAGS_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DBUILD_TESTING=OFF +) + +LIST(APPEND external_project_dependencies gflags) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake new file mode 100644 index 0000000000000000000000000000000000000000..71e20c85276b014c2e33735c3199c3772526c6c7 --- /dev/null +++ b/cmake/external/glog.cmake @@ -0,0 +1,41 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(GLOG_SOURCES_DIR ${THIRD_PARTY_PATH}/glog) +SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog) +SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) + +IF(WIN32) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) +ELSE(WIN32) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) +ENDIF(WIN32) + +INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) + +ExternalProject_Add( + glog + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/google/glog.git" + PREFIX ${GLOG_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DWITH_GFLAGS=OFF + CMAKE_ARGS -DBUILD_TESTING=OFF +) + +LIST(APPEND external_project_dependencies glog) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake new file mode 100644 index 0000000000000000000000000000000000000000..11d829a9e2f239848803130505c9862695b25029 --- /dev/null +++ b/cmake/external/gtest.cmake @@ -0,0 +1,51 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IF(WITH_TESTING) + ENABLE_TESTING() + INCLUDE(ExternalProject) + + SET(GTEST_SOURCES_DIR ${THIRD_PARTY_PATH}/gtest) + SET(GTEST_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gtest) + SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE) + + INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) + + IF(WIN32) + set(GTEST_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE) + set(GTEST_MAIN_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE) + ELSE(WIN32) + set(GTEST_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE) + set(GTEST_MAIN_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/libgtest_main.a" CACHE FILEPATH "gtest main libraries." FORCE) + ENDIF(WIN32) + + ExternalProject_Add( + gtest + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/google/googletest.git" + GIT_TAG "release-1.8.0" + PREFIX ${GTEST_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DBUILD_GMOCK=ON + CMAKE_ARGS -Dgtest_disable_pthreads=ON + CMAKE_ARGS -Dgtest_force_shared_crt=ON + ) + LIST(APPEND external_project_dependencies gtest) +ENDIF(WITH_TESTING) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0e8c29c831c823f701d8eecd954d3b120085e495 --- /dev/null +++ b/cmake/external/openblas.cmake @@ -0,0 +1,47 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(cblas) + +IF(NOT ${CBLAS_FOUND}) + MESSAGE(FATAL_ERROR "Please install OpenBlas, MKL or ATLAS.") + INCLUDE(ExternalProject) + + SET(CBLAS_SOURCES_DIR ${THIRD_PARTY_PATH}/openblas) + SET(CBLAS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/openblas) + SET(CBLAS_INC_DIR "${CBLAS_INSTALL_DIR}/include" CACHE PATH "openblas include directory." FORCE) + + IF(WIN32) + SET(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/openblas.lib" CACHE FILEPATH "openblas library." FORCE) + ELSE(WIN32) + SET(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/libopenblas.a" CACHE FILEPATH "openblas library" FORCE) + ENDIF(WIN32) + + ExternalProject_Add( + openblas + ${EXTERNAL_PROJECT_LOG_ARGS} + URL "https://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz" + PREFIX ${CBLAS_SOURCES_DIR} + INSTALL_DIR ${CBLAS_INSTALL_DIR} + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + BUILD_COMMAND make CC=${CMAKE_C_COMPILER} FC=${CMAKE_Fortran_COMPILER} + INSTALL_COMMAND make install PREFIX= + UPDATE_COMMAND "" + ) + + LIST(APPEND external_project_dependencies openblas) +ENDIF() + +INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake new file mode 100644 index 0000000000000000000000000000000000000000..c0cf2719f9a7b3ae6be5cefffa3dbd2c3f712e82 --- /dev/null +++ b/cmake/external/protobuf.cmake @@ -0,0 +1,62 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/protobuf) +SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/protobuf) +SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) + +INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) + +IF(WIN32) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) +ELSE(WIN32) + IF(${HOST_SYSTEM} STREQUAL "centos") + SET(LIB "lib64") + ELSE() + SET(LIB "lib") + ENDIF() + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/${LIB}/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/${LIB}/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/${LIB}/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) +ENDIF(WIN32) + +ExternalProject_Add( + protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${PROTOBUF_SOURCES_DIR} + UPDATE_COMMAND "" + DEPENDS zlib + GIT_REPOSITORY "https://github.com/google/protobuf.git" + GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" + CONFIGURE_COMMAND + ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake + -Dprotobuf_BUILD_TESTS=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} +) + +LIST(APPEND external_project_dependencies protobuf) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake new file mode 100644 index 0000000000000000000000000000000000000000..55787f75f8b55e2c68e28717b513c492635490ee --- /dev/null +++ b/cmake/external/python.cmake @@ -0,0 +1,222 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) +INCLUDE(python_module) + +FIND_PACKAGE(PythonInterp 2.7) +FIND_PACKAGE(PythonLibs 2.7) + +SET(py_env "") + +IF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) + find_python_module(pip REQUIRED) + find_python_module(numpy REQUIRED) + find_python_module(wheel REQUIRED) + find_python_module(google.protobuf REQUIRED) + FIND_PACKAGE(NumPy REQUIRED) +ELSE(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) + ##################################### PYTHON ######################################## + SET(PYTHON_SOURCES_DIR ${THIRD_PARTY_PATH}/python) + SET(PYTHON_INSTALL_DIR ${THIRD_PARTY_PATH}/install/python) + SET(_python_DIR ${PYTHON_INSTALL_DIR}) + + IF(UNIX) + SET(PYTHON_FOUND ON) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/lib/libpython2.7.a" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) + ELSEIF(WIN32) + SET(PYTHON_FOUND ON) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) + ELSE() + MESSAGE(FATAL_ERROR "Unknown system !") + ENDIF() + + IF(APPLE) + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS + -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON + ) + ENDIF() + + SET(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) + + # Force Python build to "Release". + IF(CMAKE_CONFIGURATION_TYPES) + SET(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) + SET(CMAKE_CFG_INTDIR "Release") + ELSE() + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS + -DCMAKE_BUILD_TYPE:STRING=Release + ) + ENDIF() + + ExternalProject_Add(python + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" + PREFIX ${PYTHON_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DPYTHON_VERSION=2.7.12 + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_CACHE_ARGS + -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} + -DBUILD_LIBPYTHON_SHARED:BOOL=OFF + -DUSE_SYSTEM_LIBRARIES:BOOL=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} + -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} + -DDOWNLOAD_SOURCES:BOOL=ON + -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} + DEPENDS zlib + ) + + SET(py_env + PATH=${PYTHON_INSTALL_DIR}/bin + PYTHONHOME=${PYTHON_INSTALL_DIR} + PYTHONPATH=${PYTHON_INSTALL_DIR}/lib:${PYTHON_INSTALL_DIR}/lib/python2.7:${PY_SITE_PACKAGES_PATH}) + #################################################################################### + + ##################################### SETUPTOOLS ################################### + SET(SETUPTOOLS_SOURCES_DIR ${PYTHON_SOURCES_DIR}/setuptools) + ExternalProject_Add(setuptools + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${SETUPTOOLS_SOURCES_DIR} + URL "https://pypi.python.org/packages/source/s/setuptools/setuptools-18.3.2.tar.gz" + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python zlib + ) + ##################################################################################### + + ##################################### SIX ########################################### + SET(SIX_SOURCES_DIR ${PYTHON_SOURCES_DIR}/six) + ExternalProject_Add(six + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${SIX_SOURCES_DIR} + URL https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python setuptools + ) + ##################################################################################### + + ##################################### CYTHON ######################################## + SET(CYTHON_SOURCES_DIR ${PYTHON_SOURCES_DIR}/cython) + ExternalProject_Add(cython + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${CYTHON_SOURCES_DIR} + URL https://github.com/cython/cython/archive/0.25.2.tar.gz + GIT_TAG 0.25.2 + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + PATCH_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python + ) + #################################################################################### + + ##################################### NUMPY ######################################## + SET(NUMPY_SOURCES_DIR ${PYTHON_SOURCES_DIR}/numpy) + SET(NUMPY_TAG_VERSION "v1.11.3") + SET(NUMPY_VERSION "1.11.3") + + SET(EGG_NAME "") + SET(PYTHON_NUMPY_INCLUDE_DIR "") + IF(WIN32) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}.egg") + ELSE(WIN32) + IF(APPLE) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}-${MACOS_VERSION}") + ELSE(APPLE) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") + ENDIF(APPLE) + + FOREACH(suffix x86_64 intel fat64 fat32 universal) + LIST(APPEND PYTHON_NUMPY_INCLUDE_DIR ${PY_SITE_PACKAGES_PATH}/${EGG_NAME}-${suffix}.egg/numpy/core/include) + ENDFOREACH() + ENDIF(WIN32) + + ExternalProject_Add(numpy + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/numpy/numpy.git + GIT_TAG ${NUMPY_TAG_VERSION} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PREFIX ${NUMPY_SOURCES_DIR} + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools cython + ) + #################################################################################### + + ##################################### WHEEL ######################################## + SET(WHEEL_SOURCES_DIR ${PYTHON_SOURCES_DIR}/wheel) + ExternalProject_Add(wheel + ${EXTERNAL_PROJECT_LOG_ARGS} + URL https://pypi.python.org/packages/source/w/wheel/wheel-0.29.0.tar.gz + PREFIX ${WHEEL_SOURCES_DIR} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools + ) + #################################################################################### + + ################################### PROTOBUF ####################################### + SET(PY_PROTOBUF_SOURCES_DIR ${PYTHON_SOURCES_DIR}/protobuf) + ExternalProject_Add(python-protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + URL https://pypi.python.org/packages/e0/b0/0a1b364fe8a7d177b4b7d4dca5b798500dc57a7273b93cca73931b305a6a/protobuf-3.1.0.post1.tar.gz + URL_MD5 38b5fb160c768d2f8444d0c6d637ff91 + PREFIX ${PY_PROTOBUF_SOURCES_DIR} + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python setuptools six + ) + #################################################################################### + + LIST(APPEND external_project_dependencies python setuptools six cython wheel python-protobuf numpy) + +ENDIF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) + +INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) +INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) + +MESSAGE("[Paddle] Python Executable: ${PYTHON_EXECUTABLE}") +MESSAGE("[Paddle] Python Include: ${PYTHON_INCLUDE_DIRS}") +MESSAGE("[Paddle] Python Libraries: ${PYTHON_LIBRARIES}") diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake new file mode 100644 index 0000000000000000000000000000000000000000..63e8bd25462e50e2f78908899938468c989b3ac3 --- /dev/null +++ b/cmake/external/swig.cmake @@ -0,0 +1,74 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FIND_PACKAGE(SWIG) + +IF(NOT SWIG_FOUND) + # build swig as an external project + INCLUDE(ExternalProject) + + SET(SWIG_SOURCES_DIR ${THIRD_PARTY_PATH}/swig) + SET(SWIG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/swig) + SET(SWIG_TARGET_VERSION "3.0.2") + SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") + SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") + + IF(WIN32) + # swig.exe available as pre-built binary on Windows: + ExternalProject_Add(swig + URL http://prdownloads.sourceforge.net/swig/swigwin-${SWIG_TARGET_VERSION}.zip + URL_MD5 ${SWIG_DOWNLOAD_WIN_MD5} + SOURCE_DIR ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + UPDATE_COMMAND "" + ) + SET(SWIG_DIR ${SWIG_SOURCES_DIR} CACHE FILEPATH "SWIG Directory" FORCE) + SET(SWIG_EXECUTABLE ${SWIG_SOURCES_DIR}/swig.exe CACHE FILEPATH "SWIG Executable" FORCE) + ELSE(WIN32) + # From PCRE configure + ExternalProject_Add(pcre + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/svn2github/pcre.git + PREFIX ${SWIG_SOURCES_DIR}/pcre + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SWIG_INSTALL_DIR}/pcre + ) + + # swig uses bison find it by cmake and pass it down + FIND_PACKAGE(BISON) + + # From SWIG configure + ExternalProject_Add(swig + GIT_REPOSITORY https://github.com/swig/swig.git + GIT_TAG rel-3.0.10 + PREFIX ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && ./autogen.sh + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && + env "PCRE_LIBS=${SWIG_INSTALL_DIR}/pcre/lib/libpcre.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcrecpp.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcreposix.a" + ./configure + --prefix=${SWIG_INSTALL_DIR} + --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre + BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make + INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install + UPDATE_COMMAND "" + DEPENDS pcre + ) + + SET(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION}) + SET(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig) + ENDIF(WIN32) + + LIST(APPEND external_project_dependencies swig) +ENDIF(NOT SWIG_FOUND) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake new file mode 100644 index 0000000000000000000000000000000000000000..f5e4b3e1eb39acbe8dbcd0023956ca7e52c1ecd8 --- /dev/null +++ b/cmake/external/warpctc.cmake @@ -0,0 +1,60 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(WARPCTC_SOURCES_DIR ${THIRD_PARTY_PATH}/warpctc) +SET(WARPCTC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/warpctc) +SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) + +INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) + +SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Directory" FORCE) + +IF(WIN32) + SET(WARPCTC_LIBRARIES + "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) +ELSE(WIN32) + IF(APPLE) + SET(_warpctc_SHARED_SUFFIX dylib) + ELSE(APPLE) + SET(_warpctc_SHARED_SUFFIX so) + ENDIF(APPLE) + + SET(WARPCTC_LIBRARIES + "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE) +ENDIF(WIN32) + +IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" ) + SET(USE_OMP OFF) +ELSE() + SET(USE_OMP ON) +ENDIF() + +ExternalProject_Add( + warpctc + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" + PREFIX ${WARPCTC_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} + CMAKE_ARGS -DWITH_GPU=${WITH_GPU} + CMAKE_ARGS -DWITH_OMP=${USE_OMP} + CMAKE_ARGS -DWITH_TORCH=OFF + CMAKE_ARGS -DBUILD_SHARED=ON +) + +LIST(APPEND external_project_dependencies warpctc) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake new file mode 100644 index 0000000000000000000000000000000000000000..47fa8817fb64fb8fd718e2892ad5bae7bbe956eb --- /dev/null +++ b/cmake/external/zlib.cmake @@ -0,0 +1,43 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(ZLIB_SOURCES_DIR ${THIRD_PARTY_PATH}/zlib) +SET(ZLIB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/zlib) +SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) +SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) + +IF(WIN32) + SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/zlibstatic.lib" CACHE FILEPATH "zlib library." FORCE) +ELSE(WIN32) + set(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) +ENDIF(WIN32) + +INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) + +ExternalProject_Add( + zlib + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/madler/zlib.git" + GIT_TAG "v1.2.8" + PREFIX ${ZLIB_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR} + CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON +) + +LIST(APPEND external_project_dependencies zlib) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 0983d83b73a32d0615170155759d45001cc6ff54..0d1ef5cd8449bd31b4cfa4619f27bce7c1f55ebb 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -3,12 +3,6 @@ include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) include(CheckCXXSymbolExists) -if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING - "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel" - FORCE) -endif() - function(CheckCompilerCXX11Flag) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8) diff --git a/cmake/FindPythonModule.cmake b/cmake/python_module.cmake similarity index 100% rename from cmake/FindPythonModule.cmake rename to cmake/python_module.cmake diff --git a/cmake/rdma.cmake b/cmake/rdma.cmake index e9a4da79aa92a92aa7e5d21bb795ab9aaf60ab8b..9ff1a77cac74fb1bdfe470a78d225ed1767bb1b5 100644 --- a/cmake/rdma.cmake +++ b/cmake/rdma.cmake @@ -5,72 +5,76 @@ # svn co https://svn.baidu.com/sys/ip/trunk/rdma/thirdparty rdma/ # we use static output in svn repositories to avoid implict bugs from not standard runtime env. -set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library") +if(WITH_RDMA) + set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library") -function(generate_rdma_links) - #redirect to current DIR to isolate the pollution from system runtime environment - #it can benifits unified control for different gcc environment. - #e.g, by default gcc48 did not refer /usr/lib64 which could contain low version - #runtime libraries that will crash process while loading it. That redirect trick - #can fix it. - execute_process( - COMMAND mkdir -p librdma - COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1 - COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so - COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1 - COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - ) -endfunction(generate_rdma_links) - - -#check and set headers -find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include) -find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio) -find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) - -#check and set libs -find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output) -find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio) -find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) - -if( - RDMA_INC_SXISOCK AND - RDMA_INC_XIO AND - RDMA_INC_EVENT AND - RDMA_INC_NUMA AND - RDMA_LIB_SXISOCK AND - RDMA_LIB_XIO AND - RDMA_LIB_EVENT AND - RDMA_LIB_EVENT_CORE AND - RDMA_LIB_EVENT_EXTRA AND - RDMA_LIB_EVENT_PTHREADS AND - RDMA_LIB_NUMA + function(generate_rdma_links) + #redirect to current DIR to isolate the pollution from system runtime environment + #it can benifits unified control for different gcc environment. + #e.g, by default gcc48 did not refer /usr/lib64 which could contain low version + #runtime libraries that will crash process while loading it. That redirect trick + #can fix it. + execute_process( + COMMAND mkdir -p librdma + COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1 + COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so + COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1 + COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) + endfunction(generate_rdma_links) - set(RDMA_INC_DIR - ${RDMA_INC_SXISOCK} - ${RDMA_INC_XIO} - ${RDMA_INC_EVENT} - ${RDMA_INC_NUMA}) - set(RDMA_LIBS - ${RDMA_LIB_SXISOCK} - ${RDMA_LIB_XIO} - ${RDMA_LIB_EVENT} - ${RDMA_LIB_EVENT_CORE} - ${RDMA_LIB_EVENT_EXTRA} - ${RDMA_LIB_EVENT_PTHREADS} - ${RDMA_LIB_NUMA} - ) - set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma") - return() -endif() + #check and set headers + find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include) + find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio) + find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) + + #check and set libs + find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output) + find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio) + find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) -#if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable + if( + RDMA_INC_SXISOCK AND + RDMA_INC_XIO AND + RDMA_INC_EVENT AND + RDMA_INC_NUMA AND + RDMA_LIB_SXISOCK AND + RDMA_LIB_XIO AND + RDMA_LIB_EVENT AND + RDMA_LIB_EVENT_CORE AND + RDMA_LIB_EVENT_EXTRA AND + RDMA_LIB_EVENT_PTHREADS AND + RDMA_LIB_NUMA + ) -message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.") + set(RDMA_INC_DIR + ${RDMA_INC_SXISOCK} + ${RDMA_INC_XIO} + ${RDMA_INC_EVENT} + ${RDMA_INC_NUMA}) + set(RDMA_LIBS + ${RDMA_LIB_SXISOCK} + ${RDMA_LIB_XIO} + ${RDMA_LIB_EVENT} + ${RDMA_LIB_EVENT_CORE} + ${RDMA_LIB_EVENT_EXTRA} + ${RDMA_LIB_EVENT_PTHREADS} + ${RDMA_LIB_NUMA} + ) + set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma") + include_directories("${RDMA_INC_DIR}") + else() + #if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable + message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.") + endif() +else(WITH_RDMA) + set(RDMA_LIBS "") + set(RDMA_LD_FLAGS "") + add_definitions(-DPADDLE_DISABLE_RDMA) +endif(WITH_RDMA) diff --git a/cmake/FindAVX.cmake b/cmake/simd.cmake similarity index 100% rename from cmake/FindAVX.cmake rename to cmake/simd.cmake diff --git a/cmake/swig.cmake b/cmake/swig.cmake deleted file mode 100644 index 97e87aa947791e2c5a88e7e554dec43bcd661664..0000000000000000000000000000000000000000 --- a/cmake/swig.cmake +++ /dev/null @@ -1,15 +0,0 @@ -function(generate_python_api target_name) - add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - COMMAND swig -python -c++ -outcurrentdir -I../ api/Paddle.swig - && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig - ${PROJ_ROOT}/paddle/api/PaddleAPI.h - WORKING_DIRECTORY ${PROJ_ROOT}/paddle - COMMENT "Generate Python API from swig") - add_custom_target(${target_name} ALL DEPENDS - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py) -endfunction(generate_python_api) diff --git a/cmake/system.cmake b/cmake/system.cmake new file mode 100644 index 0000000000000000000000000000000000000000..788db404ebfb6facbaedf2910186f3b1afe775c1 --- /dev/null +++ b/cmake/system.cmake @@ -0,0 +1,53 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IF(WIN32) + SET(HOST_SYSTEM "win32") +ELSE(WIN32) + IF(APPLE) + EXEC_PROGRAM (sw_vers ARGS -productVersion OUTPUT_VARIABLE MACOSX_VERSION) + STRING(REGEX MATCH "[0-9]+.[0-9]+" VERSION "${MACOSX_VERSION}") + SET(MACOS_VERSION ${VERSION}) + SET(HOST_SYSTEM "macosx") + ELSE(APPLE) + IF(EXISTS "/etc/issue") + FILE(READ "/etc/issue" LINUX_ISSUE) + IF(LINUX_ISSUE MATCHES "CentOS") + SET(HOST_SYSTEM "centos") + ELSEIF(LINUX_ISSUE MATCHES "Debian") + SET(HOST_SYSTEM "debian") + ELSEIF(LINUX_ISSUE MATCHES "Ubuntu") + SET(HOST_SYSTEM "ubuntu") + ENDIF() + ENDIF(EXISTS "/etc/issue") + ENDIF(APPLE) +ENDIF(WIN32) + +# query number of logical cores +CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORES QUERY NUMBER_OF_LOGICAL_CORES) + +MARK_AS_ADVANCED(HOST_SYSTEM CPU_CORES) + +MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}") +MESSAGE(STATUS "Found Paddle host system's CPU: ${CPU_CORES} cores") + +# external dependencies log output +SET(EXTERNAL_PROJECT_LOG_ARGS + LOG_DOWNLOAD 0 # Wrap download in script to log output + LOG_UPDATE 1 # Wrap update in script to log output + LOG_CONFIGURE 1 # Wrap configure in script to log output + LOG_BUILD 1 # Wrap build in script to log output + LOG_TEST 1 # Wrap test in script to log output + LOG_INSTALL 1 # Wrap install in script to log output +) diff --git a/cmake/util.cmake b/cmake/util.cmake index fcadc2ac72f51e03eb39725c791eeb9bcdc5094f..7da52bb758a3ab28db85f7c6c4d3a99b141e6342 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -24,7 +24,7 @@ function(target_circle_link_libraries TARGET_NAME) list(APPEND libsInArgn ${arg}) endif() endforeach() - if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") list(APPEND LIBS "-undefined dynamic_lookup") endif() list(REVERSE libsInArgn) @@ -81,18 +81,6 @@ function(link_paddle_exe TARGET_NAME) set(METRIC_LIBS "") endif() - if(PADDLE_WITH_INTERNAL) - set(INTERAL_LIBS paddle_internal_gserver paddle_internal_parameter) - target_circle_link_libraries(${TARGET_NAME} - ARCHIVE_START - paddle_internal_gserver - paddle_internal_owlqn - ARCHIVE_END - paddle_internal_parameter) - else() - set(INTERAL_LIBS "") - endif() - target_circle_link_libraries(${TARGET_NAME} ARCHIVE_START paddle_gserver @@ -108,25 +96,15 @@ function(link_paddle_exe TARGET_NAME) paddle_proto paddle_cuda ${METRIC_LIBS} - ${PROTOBUF_LIBRARY} - ${LIBGLOG_LIBRARY} - gflags + ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} - ${CMAKE_THREAD_LIBS_INIT} - ${CBLAS_LIBS} - ${ZLIB_LIBRARIES} - ${INTERAL_LIBS} - ${CMAKE_DL_LIBS}) - - if(WITH_RDMA) - target_link_libraries(${TARGET_NAME} - ${RDMA_LD_FLAGS} - ${RDMA_LIBS}) - endif() + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) if(WITH_PYTHON) target_link_libraries(${TARGET_NAME} - ${PYTHON_LIBRARIES}) + ${PYTHON_LIBRARIES} util) endif() if(WITH_GPU) @@ -143,10 +121,7 @@ function(link_paddle_exe TARGET_NAME) endif() endif() - if(NOT WITH_DSO) - target_link_libraries(${TARGET_NAME} - ${WARPCTC_LIBRARY}) - endif() + add_dependencies(${TARGET_NAME} ${external_project_dependencies}) endfunction() # link_paddle_test @@ -157,6 +132,7 @@ function(link_paddle_test TARGET_NAME) link_paddle_exe(${TARGET_NAME}) target_link_libraries(${TARGET_NAME} paddle_test_main + paddle_test_util ${GTEST_LIBRARIES}) endfunction() diff --git a/cmake/version.cmake b/cmake/version.cmake index a0518e07e88a1ff468c301523f888c7d95e15185..ac1583a24c828629c46cb9cf4e965f8da2273732 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -21,4 +21,5 @@ while ("${PADDLE_VERSION}" STREQUAL "") endif() endwhile() +add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION}) message(STATUS "Paddle version is ${PADDLE_VERSION}") diff --git a/demo/traffic_prediction/README b/demo/traffic_prediction/README new file mode 100644 index 0000000000000000000000000000000000000000..4c95188583513c332b7d7cb0a32d59336208e1aa --- /dev/null +++ b/demo/traffic_prediction/README @@ -0,0 +1,7 @@ +run by: +cd ./data +sh get_data.sh +cd .. +sh train.sh +sh predict.sh + diff --git a/demo/traffic_prediction/data/get_data.sh b/demo/traffic_prediction/data/get_data.sh new file mode 100755 index 0000000000000000000000000000000000000000..f2fa548d4709c0361334f117bfb49e18d83c32f4 --- /dev/null +++ b/demo/traffic_prediction/data/get_data.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +DIR="$( cd "$(dirname "$0")" ; pwd -P )" +cd $DIR + +#download the dataset +echo "Downloading traffic data..." +wget http://paddlepaddle.cdn.bcebos.com/demo/traffic/traffic_data.tar.gz + +#extract package +echo "Unzipping..." +tar -zxvf traffic_data.tar.gz + +echo "data/speeds.csv" > train.list +echo "data/speeds.csv" > test.list +echo "data/speeds.csv" > pred.list + +echo "Done." diff --git a/demo/traffic_prediction/dataprovider.py b/demo/traffic_prediction/dataprovider.py new file mode 100644 index 0000000000000000000000000000000000000000..c7883b6950c369ee67c39b80ce1cefbbf9350459 --- /dev/null +++ b/demo/traffic_prediction/dataprovider.py @@ -0,0 +1,82 @@ +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer.PyDataProvider2 import * +import sys +import numpy as np +TERM_NUM = 24 +FORECASTING_NUM = 24 +LABEL_VALUE_NUM = 4 + + +def initHook(settings, file_list, **kwargs): + """ + Init hook is invoked before process data. It will set obj.slots and store data meta. + + :param settings: global object. It will passed to process routine. + :type obj: object + :param file_list: the meta file object, which passed from trainer_config.py,but unused in this function. + :param kwargs: unused other arguments. + """ + del kwargs #unused + + settings.pool_size = sys.maxint + #Use a time seires of the past as feature. + #Dense_vector's expression form is [float,float,...,float] + settings.input_types = [dense_vector(TERM_NUM)] + #There are next FORECASTING_NUM fragments you need predict. + #Every predicted condition at time point has four states. + for i in range(FORECASTING_NUM): + settings.input_types.append(integer_value(LABEL_VALUE_NUM)) + + +@provider( + init_hook=initHook, cache=CacheType.CACHE_PASS_IN_MEM, should_shuffle=True) +def process(settings, file_name): + with open(file_name) as f: + #abandon fields name + f.next() + for row_num, line in enumerate(f): + speeds = map(int, line.rstrip('\r\n').split(",")[1:]) + # Get the max index. + end_time = len(speeds) + # Scanning and generating samples + for i in range(TERM_NUM, end_time - FORECASTING_NUM): + # For dense slot + pre_spd = map(float, speeds[i - TERM_NUM:i]) + + # Integer value need predicting, values start from 0, so every one minus 1. + fol_spd = [j - 1 for j in speeds[i:i + FORECASTING_NUM]] + + # Predicting label is missing, abandon the sample. + if -1 in fol_spd: + continue + yield [pre_spd] + fol_spd + + +def predict_initHook(settings, file_list, **kwargs): + settings.pool_size = sys.maxint + settings.input_types = [dense_vector(TERM_NUM)] + + +@provider(init_hook=predict_initHook, should_shuffle=False) +def process_predict(settings, file_name): + with open(file_name) as f: + #abandon fields name + f.next() + for row_num, line in enumerate(f): + speeds = map(int, line.rstrip('\r\n').split(",")) + end_time = len(speeds) + pre_spd = map(float, speeds[end_time - TERM_NUM:end_time]) + yield pre_spd diff --git a/demo/traffic_prediction/gen_result.py b/demo/traffic_prediction/gen_result.py new file mode 100644 index 0000000000000000000000000000000000000000..3da70b30315f863fd3582583e9a29540a09c1e7f --- /dev/null +++ b/demo/traffic_prediction/gen_result.py @@ -0,0 +1,61 @@ +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +res = [] +with open('./rank-00000') as f: + for line in f: + pred = map(int, line.strip('\r\n;').split(";")) + #raw prediction range from 0 to 3 + res.append([i + 1 for i in pred]) + +file_name = open('./data/pred.list').read().strip('\r\n') + +FORECASTING_NUM = 24 +header = [ + 'id', + '201604200805', + '201604200810', + '201604200815', + '201604200820', + '201604200825', + '201604200830', + '201604200835', + '201604200840', + '201604200845', + '201604200850', + '201604200855', + '201604200900', + '201604200905', + '201604200910', + '201604200915', + '201604200920', + '201604200925', + '201604200930', + '201604200935', + '201604200940', + '201604200945', + '201604200950', + '201604200955', + '201604201000', +] +################### +## To CSV format ## +################### +with open(file_name) as f: + f.next() + print ','.join(header) + for row_num, line in enumerate(f): + fields = line.rstrip('\r\n').split(',') + linkid = fields[0] + print linkid + ',' + ','.join(map(str, res[row_num])) diff --git a/demo/traffic_prediction/predict.sh b/demo/traffic_prediction/predict.sh new file mode 100755 index 0000000000000000000000000000000000000000..cec35dce11d1c146a9e878ebab81abe904d6136c --- /dev/null +++ b/demo/traffic_prediction/predict.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +cfg=trainer_config.py +# pass choice +model="output/pass-00000" +paddle train \ + --config=$cfg \ + --use_gpu=false \ + --job=test \ + --init_model_path=$model \ + --config_args=is_predict=1 \ + --predict_output_dir=. + +python gen_result.py > result.txt + +rm -rf rank-00000 diff --git a/demo/traffic_prediction/train.sh b/demo/traffic_prediction/train.sh new file mode 100755 index 0000000000000000000000000000000000000000..48dfc5604f80042598c5c779bd450a5808fdfb64 --- /dev/null +++ b/demo/traffic_prediction/train.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +cfg=trainer_config.py +paddle train \ + --config=$cfg \ + --save_dir=./output \ + --trainer_count=4 \ + --log_period=1000 \ + --dot_period=10 \ + --num_passes=10 \ + --use_gpu=false \ + --show_parameter_stats_period=3000 \ + 2>&1 | tee 'train.log' diff --git a/demo/traffic_prediction/trainer_config.py b/demo/traffic_prediction/trainer_config.py new file mode 100755 index 0000000000000000000000000000000000000000..52d678624aff7ca2264c3c20e320004217d14397 --- /dev/null +++ b/demo/traffic_prediction/trainer_config.py @@ -0,0 +1,52 @@ +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddle.trainer_config_helpers import * + +################################### DATA Configuration ############################################# +is_predict = get_config_arg('is_predict', bool, False) +trn = './data/train.list' if not is_predict else None +tst = './data/test.list' if not is_predict else './data/pred.list' +process = 'process' if not is_predict else 'process_predict' +define_py_data_sources2( + train_list=trn, test_list=tst, module="dataprovider", obj=process) +################################### Parameter Configuaration ####################################### +TERM_NUM = 24 +FORECASTING_NUM = 24 +emb_size = 16 +batch_size = 128 if not is_predict else 1 +settings( + batch_size=batch_size, + learning_rate=1e-3, + learning_method=RMSPropOptimizer()) +################################### Algorithm Configuration ######################################## + +output_label = [] + +link_encode = data_layer(name='link_encode', size=TERM_NUM) +for i in xrange(FORECASTING_NUM): + # Each task share same weight. + link_param = ParamAttr( + name='_link_vec.w', initial_max=1.0, initial_min=-1.0) + link_vec = fc_layer(input=link_encode, size=emb_size, param_attr=link_param) + score = fc_layer(input=link_vec, size=4, act=SoftmaxActivation()) + if is_predict: + maxid = maxid_layer(score) + output_label.append(maxid) + else: + # Multi-task training. + label = data_layer(name='label_%dmin' % ((i + 1) * 5), size=4) + cls = classification_cost( + input=score, name="cost_%dmin" % ((i + 1) * 5), label=label) + output_label.append(cls) +outputs(output_label) diff --git a/doc/howto/usage/k8s/k8s_aws_en.md b/doc/howto/usage/k8s/k8s_aws_en.md index 422dc3bd811ae8b31dbdd6fa8637d6e44b29ac76..b04bfba590de42956dfe99256cde325b24adbfab 100644 --- a/doc/howto/usage/k8s/k8s_aws_en.md +++ b/doc/howto/usage/k8s/k8s_aws_en.md @@ -4,10 +4,10 @@ To use AWS, we need to sign up an AWS account on Amazon's Web site. An AWS account allows us to login to the AWS Console Web interface to -create IAM users and user groups. Usually, we create a user group with +create IAM users and user groups. Usually, we create a user group with privileges required to run PaddlePaddle, and we create users for those who are going to run PaddlePaddle and add these users into the -group. IAM users can identify themselves using password and tokens, +group. IAM users can identify themselves using password and tokens, where passwords allows users to log in to the AWS Console, and tokens make it easy for users to submit and inspect jobs from the command line. @@ -360,7 +360,7 @@ In one time of distributed training, user will confirm the PaddlePaddle node num ####Create PaddlePaddle Node -After Kubernetes master gets the request, it will parse the yaml file and create several pods (defined by PaddlePaddle's node number), Kubernetes will allocate these pods onto cluster's node. A pod represents a PaddlePaddle node, when pod is successfully allocated onto one physical/virtual machine, Kubernetes will startup the container in the pod, and this container will use the environment variables in yaml file and start up `paddle pserver` and `paddle trainer` processes. +After Kubernetes master gets the request, it will parse the yaml file and create several pods (defined by PaddlePaddle's node number), Kubernetes will allocate these pods onto cluster's node. A pod represents a PaddlePaddle node, when pod is successfully allocated onto one physical/virtual machine, Kubernetes will startup the container in the pod, and this container will use the environment variables in yaml file and start up `paddle pserver` and `paddle trainer` processes. ####Start up Training @@ -661,6 +661,6 @@ Sometimes we might need to create or manage the cluster on AWS manually with lim ### Some Presumptions * Instances run on CoreOS, the official IAM. -* Kubernetes node use instance storage, no EBS get mounted. Etcd is running on additional node. +* Kubernetes node use instance storage, no EBS get mounted. Etcd is running on additional node. * For networking, we use Flannel network at this moment, we will use Calico solution later on. * When you create a service with Type=LoadBalancer, Kubernetes will create and ELB, and create a security group for the ELB. diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp index 0cafbd896e2d88aee4406bd0305878ce489bc18d..41beed38a87601cb57072c8966cd0fd2ea156524 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/api/Arguments.cpp @@ -137,6 +137,10 @@ void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) { a.cpuSequenceDims = m->cast(vec->getSharedPtr()); } +float Arguments::sumCosts() const { + return paddle::Argument::sumCosts(m->outputs); +} + int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) { auto& a = m->getArg(idx); return a.getBatchSize(); diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index da6dad10cd807654f9ddd03beeb29cef69fc8de0..6e8fcd114df580a00858d95f0af0d1ec0bd9b4a2 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -1,3 +1,21 @@ +FUNCTION(generate_python_api target_name) + ADD_CUSTOM_COMMAND(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + ${PROJ_ROOT}/paddle/Paddle_wrap.cxx + ${PROJ_ROOT}/paddle/Paddle_wrap.h + COMMAND ${SWIG_EXECUTABLE} -python -c++ -outcurrentdir -I../ api/Paddle.swig + && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig + ${PROJ_ROOT}/paddle/api/PaddleAPI.h + ${external_project_dependencies} + WORKING_DIRECTORY ${PROJ_ROOT}/paddle + COMMENT "Generate Python API from swig") + ADD_CUSTOM_TARGET(${target_name} ALL DEPENDS + ${PROJ_ROOT}/paddle/Paddle_wrap.cxx + ${PROJ_ROOT}/paddle/Paddle_wrap.h + ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + ${external_project_dependencies}) +ENDFUNCTION(generate_python_api) + set(API_SOURCES Arguments.cpp ConfigParser.cpp @@ -42,7 +60,7 @@ file(GLOB PY_PADDLE_PYTHON_FILES ${PROJ_ROOT}/paddle/py_paddle/*.py) # TODO(yuyang18) : make wheel name calculated by cmake add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp - COMMAND ${PYTHON_EXECUTABLE} setup.py bdist_wheel + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch dist/.timestamp COMMAND rm -rf py_paddle.egg-info build WORKING_DIRECTORY ${PROJ_ROOT}/paddle @@ -76,5 +94,19 @@ add_dependencies(python_api_wheel python_swig_sources paddle_cuda) if(WITH_TESTING) + IF(NOT PY_PIP_FOUND) + SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) + ExternalProject_Add(pip + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/pypa/pip.git + GIT_TAG 9.0.1 + PREFIX ${PIP_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools python_api_wheel + ) + ENDIF() add_subdirectory(test) endif() diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 81c9eed0bccd5ad63f524cdb011fc73cd568f465..f5af8b0035b44d97832dd90ca2eeba079503715c 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -19,8 +19,8 @@ limitations under the License. */ #include #include #include +#include "paddle/utils/Common.h" #include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/common.h" /// Import PaddlePaddle's enumeration into global namespace. using namespace paddle::enumeration_wrapper; // NOLINT @@ -450,6 +450,8 @@ public: IVector* vec) throw(RangeError); void setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError); + float sumCosts() const; + private: static Arguments* createByPaddleArgumentVector(void* ptr); void* getInternalArgumentsPtr() const; @@ -546,6 +548,10 @@ public: ParameterConfig* getConfig(); void setValueUpdated(); + bool save(const std::string& filename) const; + + bool load(const std::string& filename) const; + size_t getSize() const; private: diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index ddc00d8d1af4c58d7e2233423bea916408bee92b..19f7a898d6b8d3d02c5654559dcb86728266731e 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -57,4 +57,12 @@ size_t Parameter::getID() const { return m->getPtr()->getID(); } void Parameter::setValueUpdated() { m->getPtr()->setValueUpdated(); } +bool Parameter::save(const std::string& filename) const { + return m->getPtr()->save(filename); +} + +bool Parameter::load(const std::string& filename) const { + return m->getPtr()->load(filename); +} + size_t Parameter::getSize() const { return m->getPtr()->getSize(); } diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index c3f739568f50b6ee8b0894d06a4d7f91c7816879..54d67aa62f4d87ad03282962c722019698dc621a 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -15,12 +15,11 @@ limitations under the License. */ #include "PaddleAPI.h" #include "paddle/parameter/Parameter.h" -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Flags.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Util.h" -#include #include #include #include diff --git a/paddle/api/paddle_api_config.py.in b/paddle/api/paddle_api_config.py.in index 23542b952b7699d66cf64b47d0354e9078ae06d9..e11ee920362aed3ec79a2e62d447d7dde4a99248 100644 --- a/paddle/api/paddle_api_config.py.in +++ b/paddle/api/paddle_api_config.py.in @@ -1,17 +1,17 @@ PADDLE_BUILD_DIR="@CMAKE_CURRENT_BINARY_DIR@/../" WITH_GPU="@WITH_GPU@" -PROTOBUF_LIB="@PROTOBUF_LIBRARY@" -ZLIB_LIB="@ZLIB_LIBRARIES@" +PROTOBUF_LIBRARY="@PROTOBUF_LIBRARY@" +ZLIB_LIBRARIES="@ZLIB_LIBRARIES@" CMAKE_THREAD_LIB="@CMAKE_THREAD_LIBS_INIT@" CMAKE_DL_LIBS="@CMAKE_DL_LIBS@" WITH_PYTHON="@WITH_PYTHON@" PYTHON_LIBRARIES="@PYTHON_LIBRARIES@" -LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@" +GLOG_LIBRARIES="@GLOG_LIBRARIES@" GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@" GFLAGS_LOCATION="@GFLAGS_LOCATION@" -CBLAS_LIBRARIES="@CBLAS_LIBS@" +CBLAS_LIBRARIES="@CBLAS_LIBRARIES@" -CUDA_LIBRARIES="@CUDA_LIBRARIES@" +CUDA_LIBRARIES="@CUDA_cudart_shared_LIBRARY@" WITH_COVERALLS="@ON_COVERALLS@" diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index 152085db847b4cfe4b77cdda49899442f6324ac8..59b0657bd0e3923b174133d79cdc936b1ab814ad 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -40,14 +40,14 @@ try: self.paddle_build_dir = PADDLE_BUILD_DIR self.paddle_build_dir = os.path.abspath(self.paddle_build_dir) self.with_gpu = PaddleLDFlag.cmake_bool(WITH_GPU) - self.protolib = PROTOBUF_LIB - self.zlib = ZLIB_LIB + self.protolib = PROTOBUF_LIBRARY + self.zlib = ZLIB_LIBRARIES self.thread = CMAKE_THREAD_LIB self.dl_libs = CMAKE_DL_LIBS self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON) self.python_libs = PYTHON_LIBRARIES - self.glog_libs = LIBGLOG_LIBRARY + self.glog_libs = GLOG_LIBRARIES self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) self.gflags_location = GFLAGS_LOCATION diff --git a/paddle/api/test/.gitignore b/paddle/api/test/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b7948824a1eab119140dd9bea20276c303fe4af1 --- /dev/null +++ b/paddle/api/test/.gitignore @@ -0,0 +1,2 @@ +*.w0 +*.wbias diff --git a/paddle/api/test/CMakeLists.txt b/paddle/api/test/CMakeLists.txt index 08a0fe96a004d38b81d0bac881da1faeb52685f4..a2fa623c80087d42e6a2a5c05f62eba4997f8ec4 100644 --- a/paddle/api/test/CMakeLists.txt +++ b/paddle/api/test/CMakeLists.txt @@ -1,2 +1,2 @@ add_test(NAME test_swig_api - COMMAND bash ${PROJ_ROOT}/paddle/api/test/run_tests.sh) + COMMAND bash ${PROJ_ROOT}/paddle/api/test/run_tests.sh ${PYTHON_EXECUTABLE}) diff --git a/paddle/api/test/run_tests.sh b/paddle/api/test/run_tests.sh index 2f12ba026430ba7adb6f4dee11ed17ea3ad3f36d..bcf06afa86aaa1a3151aeb966b54f69657c541e3 100755 --- a/paddle/api/test/run_tests.sh +++ b/paddle/api/test/run_tests.sh @@ -20,11 +20,7 @@ popd > /dev/null cd $SCRIPTPATH -rm -rf .test_env -virtualenv .test_env -source .test_env/bin/activate - -pip --timeout 600 install ../../dist/*.whl +$1 -m pip install ../../dist/*.whl test_list="testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py testTrainer.py" @@ -33,7 +29,7 @@ export PYTHONPATH=$PWD/../../../python/ for fn in $test_list do echo "test $fn" - python $fn + $1 $fn if [ $? -ne 0 ]; then exit 1 fi diff --git a/paddle/api/test/testArguments.py b/paddle/api/test/testArguments.py index 8cabecd242fb4eb98c0fe468687ef179245e4535..a04a805d7a64ef906c8388f1241b9ef823e4d9e0 100644 --- a/paddle/api/test/testArguments.py +++ b/paddle/api/test/testArguments.py @@ -22,6 +22,8 @@ class TestArguments(unittest.TestCase): args = swig_paddle.Arguments.createArguments(1) args.setSlotValue(0, m) + self.assertAlmostEqual(27.0, args.sumCosts()) + mat = args.getSlotValue(0) assert isinstance(mat, swig_paddle.Matrix) np_mat = mat.toNumpyMatInplace() diff --git a/paddle/api/test/testGradientMachine.py b/paddle/api/test/testGradientMachine.py index b81eafa9673ca34f1b7e06401098d55bdb1b35a5..4b705f66eccd267f326fe0662a17b33a09fda982 100644 --- a/paddle/api/test/testGradientMachine.py +++ b/paddle/api/test/testGradientMachine.py @@ -45,6 +45,7 @@ class TestGradientMachine(unittest.TestCase): assert isinstance(val, swig_paddle.Vector) arr = numpy.full((len(val), ), 0.1, dtype="float32") val.copyFromNumpyArray(arr) + self.assertTrue(param.save(param.getName())) param_config = param.getConfig().toProto() assert isinstance(param_config, paddle.proto.ParameterConfig_pb2.ParameterConfig) @@ -92,6 +93,9 @@ class TestGradientMachine(unittest.TestCase): self.assertTrue(self.isCalled) + for param in machine.getParameters(): + self.assertTrue(param.load(param.getName())) + def test_train_one_pass(self): conf_file_path = './testTrainConfig.py' trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile( diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index aa1ff4a771c4a1c64be86893e7b2261ae65f0f94..57fb89608f4bcf3e6829fe850a61c2a626adfbdc 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -88,6 +88,8 @@ else() ${CUDA_CXX_SOURCES}) endif() +add_dependencies(paddle_cuda ${external_project_dependencies}) + add_style_check_target(paddle_cuda ${CUDA_SOURCES} ${CUDA_HEADERS} diff --git a/paddle/cuda/include/hl_warpctc_wrap.h b/paddle/cuda/include/hl_warpctc_wrap.h index 79bf6c3db7f876009d98a62b6523588f021886e8..7885ae570148c0b9870089baf22b6bacb786f995 100644 --- a/paddle/cuda/include/hl_warpctc_wrap.h +++ b/paddle/cuda/include/hl_warpctc_wrap.h @@ -15,8 +15,8 @@ limitations under the License. */ #ifndef HL_WARPCTC_WRAP_H_ #define HL_WARPCTC_WRAP_H_ +#include "ctc.h" #include "hl_base.h" -#include "warp-ctc/include/ctc.h" typedef ctcStatus_t hl_warpctc_status_t; typedef ctcOptions hl_warpctc_options_t; diff --git a/paddle/cuda/src/hl_warpctc_wrap.cc b/paddle/cuda/src/hl_warpctc_wrap.cc index 9ae8bc0f220e143a5c59d8c3ead012a20369e7b9..55b940ca67acce2c7ee7c1ee286ab96240652274 100644 --- a/paddle/cuda/src/hl_warpctc_wrap.cc +++ b/paddle/cuda/src/hl_warpctc_wrap.cc @@ -29,7 +29,6 @@ void* warpctc_dso_handle = nullptr; * false, you need to add the path of libwarp-ctc.so to * the linked-libs of paddle or to LD_PRELOAD. */ -#ifdef PADDLE_USE_DSO #define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ struct DynLoad__##__name { \ template \ @@ -41,15 +40,6 @@ void* warpctc_dso_handle = nullptr; return reinterpret_cast(p_##_name)(args...); \ } \ } __name; // struct DynLoad__##__name -#else -#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - return __name(args...); \ - } \ - } __name; // struct DynLoad__##__name -#endif // include all needed warp-ctc functions DYNAMIC_LOAD_WARPCTC_WRAP(get_warpctc_version) diff --git a/paddle/function/BufferArg.cpp b/paddle/function/BufferArg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65c6f303041d830812fb2d99503b2b2166145f4a --- /dev/null +++ b/paddle/function/BufferArg.cpp @@ -0,0 +1,31 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include + +#include "BufferArg.h" + +namespace paddle { + +const SequenceArg& BufferArg::sequence() const { + // CHECK_EQ(bufferType_, TENSOR_SEQUENCE_DATA); + return dynamic_cast(*this); +} + +const SparseMatrixArg& BufferArg::sparse() const { + // CHECK_EQ(bufferType_, TENSOR_SPARSE); + return dynamic_cast(*this); +} + +} // namespace paddle diff --git a/paddle/function/BufferArg.h b/paddle/function/BufferArg.h new file mode 100644 index 0000000000000000000000000000000000000000..9649913fa8d9bf82b67fc2ac97ae9f30e7029528 --- /dev/null +++ b/paddle/function/BufferArg.h @@ -0,0 +1,281 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "TensorShape.h" +#include "TensorType.h" +#include "paddle/math/CpuSparseMatrix.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/SparseMatrix.h" + +namespace paddle { + +enum BufferType { + TENSOR_NORMAL = 0, + TENSOR_SEQUENCE_ID = 1, + TENSOR_SEQUENCE_DATA = 2, + TENSOR_SPARSE = 3 +}; + +enum SparseDataType { + SPARSE_NO_VALUE = 0, // do not need value pointer, all values are 1 + SPARSE_FLOAT_VALUE = 1 +}; + +enum SparseDataFormat { SPARSE_CSR_FORMAT = 0, SPARSE_CSC_FORMAT = 1 }; + +class BufferArg; +class SequenceArg; +class SparseMatrixArg; +typedef std::shared_ptr BufferArgPtr; + +/** + * \brief BufferArg used as the argument type of Function. + * + * The arguments of the Paddle Function have four Buffer types. + * 1. BufferArg for a dense Buffer of any dimension. + * 2. SequenceIdArg for a Buffer of sequence start positions. + * 3. SequenceArg for a Buffer of sequence data. + * 4. SparseMatrixArg for a Buffer of sparse matrix. + * + * There is an ArgType property for the BufferArg used as Function Output. + * Whether the result of the Function calculation is assigned to the + * output Buffer or added to the output Buffer is determined by the + * argType_ property of the output BufferArg. + */ + +// ArgType is only used by output BufferArg. +// For input argument, argType_ is ignored. +// For output argument, need to set the argType_ of the BufferArg. +enum ArgType { + UNSPECIFIED = 0, + ASSIGN_TO = 1, + ADD_TO = 2, +}; +class BufferArg { +public: + void setArgType(ArgType argType) { argType_ = argType; } + + ArgType getArgType() const { return argType_; } + +public: + BufferArg(void* buf, + ValueType valueType, + const TensorShape& shape, + ArgType argType = UNSPECIFIED) + : buf_(buf), valueType_(valueType), shape_(shape), argType_(argType) {} + + BufferArg(void* buf, ValueType valueType) + : buf_(buf), valueType_(valueType) {} + + BufferArg(const Matrix& matrix, ArgType argType = UNSPECIFIED) + : buf_( + const_cast(reinterpret_cast(matrix.getData()))), + valueType_(DataType::value), + shape_(2), + argType_(argType) { + shape_.setDim(0, matrix.getHeight()); + shape_.setDim(1, matrix.getWidth()); + } + + BufferArg(const Matrix& matrix, + const TensorShape& shape, + ArgType argType = UNSPECIFIED) + : buf_( + const_cast(reinterpret_cast(matrix.getData()))), + valueType_(DataType::value), + shape_(shape), + argType_(argType) { + CHECK_EQ(matrix.getElementCnt(), shape.getElements()); + } + + BufferArg(const Vector& vector, ArgType argType = UNSPECIFIED) + : buf_( + const_cast(reinterpret_cast(vector.getData()))), + valueType_(DataType::value), + shape_(1), + argType_(argType) { + shape_.setDim(0, vector.getSize()); + } + + BufferArg(const IVector& vector, ArgType argType = UNSPECIFIED) + : buf_( + const_cast(reinterpret_cast(vector.getData()))), + valueType_(VALUE_TYPE_INT32), + shape_(1), + argType_(argType) { + shape_.setDim(0, vector.getSize()); + } + + template + typename Tensor::Matrix matrix() const { + CHECK(buf_); + CHECK(valueType_ == DataType::value); + // CHECK(deviceType_ == DType); + CHECK_EQ((size_t)2, shape_.ndims()); + return typename Tensor::Matrix( + reinterpret_cast(buf_), shape_[0], shape_[1]); + } + + template + typename Tensor::Vector vector() const { + CHECK(buf_); + CHECK(valueType_ == DataType::value); + // CHECK(deviceType_ == DType); + CHECK_EQ((size_t)1, shape_.ndims()); + return typename Tensor::Vector( + shape_[0], reinterpret_cast(buf_)); + } + + virtual ~BufferArg() {} + + template + T* data() const { + return reinterpret_cast(buf_); + } + + void* data() const { return buf_; } + ValueType valueType() const { return valueType_; } + BufferType bufferType() const { return bufferType_; } + const TensorShape& shape() const { return shape_; } + + const SequenceArg& sequence() const; + const SparseMatrixArg& sparse() const; + +protected: + void* buf_; + ValueType valueType_; + TensorShape shape_; + BufferType bufferType_; + ArgType argType_ = UNSPECIFIED; + // leading dimensions. The size is dims_.size() + // Dims lds_; +}; + +// sequence start positions in a mini-batch of sequences +// shape_.ndims() == 1 +// valueType_ = int32 +// if a < b then value_.buf_[a] < value_.buf_[b] +class SequenceIdArg : public BufferArg { +public: + SequenceIdArg(void* buf, + const TensorShape& shape, + ArgType argType = UNSPECIFIED) + : BufferArg(buf, VALUE_TYPE_INT32, shape, argType) { + CHECK_EQ(shape_.ndims(), (size_t)1); + numSeqs_ = shape_[0] - 1; + } + + SequenceIdArg(const IVector& vector) : BufferArg(vector) { + numSeqs_ = shape_[0] - 1; + } + + ~SequenceIdArg() {} + + size_t numSeqs() const { return numSeqs_; } + +private: + size_t numSeqs_; +}; + +// sequence data +class SequenceArg : public BufferArg { +public: + SequenceArg(void* buf, + ValueType valueType, + const TensorShape& shape, + const SequenceIdArg& startPositions, + ArgType argType = UNSPECIFIED) + : BufferArg(buf, valueType, shape, argType), + startPositions_(startPositions) {} + + SequenceArg(const Matrix& matrix, + const IVector& vector, + ArgType argType = UNSPECIFIED) + : BufferArg(matrix, argType), startPositions_(vector) {} + + ~SequenceArg() {} + + void* getIdBuf() const { return startPositions_.data(); } + size_t numSeqs() const { return startPositions_.numSeqs(); } + +private: + SequenceIdArg startPositions_; +}; + +// sparse matrix +// valueType_ == float or double +// shape_.ndims() == 2 +class SparseMatrixArg : public BufferArg { +public: + SparseMatrixArg(void* buf, + ValueType valueType, + const TensorShape& shape, + const BufferArg& row, + const BufferArg& col, + size_t nnz, + SparseDataFormat format, + SparseDataType type, + ArgType argType = UNSPECIFIED) + : BufferArg(buf, valueType, shape, argType), + row_(row), + col_(col), + nnz_(nnz), + format_(format), + type_(type) { + CHECK((valueType == VALUE_TYPE_FLOAT) || (valueType == VALUE_TYPE_DOUBLE)); + CHECK_EQ(shape_.ndims(), (size_t)2); + CHECK_EQ(row_.shape().ndims(), (size_t)1); + CHECK_EQ(col_.shape().ndims(), (size_t)1); + if (format == SPARSE_CSR_FORMAT) { + CHECK_EQ(nnz, col.shape()[0]); + } else if (format == SPARSE_CSC_FORMAT) { + CHECK_EQ(nnz, row.shape()[0]); + } + } + + SparseMatrixArg(const CpuSparseMatrix& sparse, ArgType argType = UNSPECIFIED) + : BufferArg(sparse, argType), + row_(reinterpret_cast(sparse.getRows()), VALUE_TYPE_INT32), + col_(reinterpret_cast(sparse.getCols()), VALUE_TYPE_INT32) {} + + SparseMatrixArg(const GpuSparseMatrix& sparse, ArgType argType = UNSPECIFIED) + : BufferArg(sparse, argType), + row_(reinterpret_cast(sparse.getRows()), VALUE_TYPE_INT32), + col_(reinterpret_cast(sparse.getCols()), VALUE_TYPE_INT32) {} + + ~SparseMatrixArg() {} + + void* getRowBuf() const { return row_.data(); } + + void* getColBuf() const { return col_.data(); } + + size_t nnz() const { return nnz_; } + + SparseDataFormat dataFormat() const { return format_; } + + SparseDataType dataType() const { return type_; } + +private: + BufferArg row_; + BufferArg col_; + size_t nnz_; + SparseDataFormat format_; + SparseDataType type_; +}; + +} // namespace paddle diff --git a/paddle/function/BufferArgTest.cpp b/paddle/function/BufferArgTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9ee3ab079e339b86a9db8602c41e419df9dc544 --- /dev/null +++ b/paddle/function/BufferArgTest.cpp @@ -0,0 +1,90 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "BufferArg.h" +#include +#include "Function.h" +#include "paddle/math/MemoryHandle.h" + +namespace paddle { + +TEST(BufferTest, BufferArg) { + TensorShape shape({8, 10}); + CpuMemoryHandle memory(shape.getElements() * + sizeOfValuType(VALUE_TYPE_FLOAT)); + BufferArg buffer(memory.getBuf(), VALUE_TYPE_FLOAT, shape); + EXPECT_EQ(buffer.data(), memory.getBuf()); +} + +TEST(BufferTest, SequenceIdArg) { + TensorShape shape({10}); + CpuMemoryHandle memory(shape.getElements() * + sizeOfValuType(VALUE_TYPE_INT32)); + SequenceIdArg buffer(memory.getBuf(), shape); + EXPECT_EQ(buffer.data(), memory.getBuf()); + EXPECT_EQ(buffer.numSeqs(), 9); +} + +TEST(BufferTest, asArgument) { + MatrixPtr matrix = Matrix::create(100, 200); + VectorPtr vector = Vector::create(100, false); + CpuSparseMatrix sparse(200, 300, 50); + + // prepare arguments + BufferArgs argments; + argments.addArg(*matrix); + argments.addArg(*vector); + argments.addArg(sparse); + + // function + auto function = [=](const BufferArgs& inputs) { + EXPECT_EQ(inputs.size(), 3); + + // check inputs[0] + EXPECT_EQ(inputs[0].shape().ndims(), 2); + EXPECT_EQ(inputs[0].shape()[0], 100); + EXPECT_EQ(inputs[0].shape()[1], 200); + EXPECT_EQ(inputs[0].data(), matrix->getData()); + + EXPECT_EQ(inputs[0].matrix().getHeight(), + matrix->getHeight()); + EXPECT_EQ(inputs[0].matrix().getWidth(), + matrix->getWidth()); + EXPECT_EQ(inputs[0].matrix().getData(), matrix->getData()); + + // check inputs[1] + EXPECT_EQ(inputs[1].shape().ndims(), 1); + EXPECT_EQ(inputs[1].shape()[0], 100); + EXPECT_EQ(inputs[1].data(), vector->getData()); + CpuVector inVector = inputs[1].vector(); + EXPECT_EQ(inVector.getSize(), vector->getSize()); + EXPECT_EQ(inVector.getData(), vector->getData()); + + // check inputs[2] + EXPECT_EQ(inputs[2].shape().ndims(), 2); + EXPECT_EQ(inputs[2].shape()[0], 200); + EXPECT_EQ(inputs[2].shape()[1], 300); + EXPECT_EQ(inputs[2].data(), sparse.getData()); + // CHECK_EQ(inputs[2].sparse().nnz(), 50); + // CHECK_EQ(inputs[2].sparse().dataFormat(), SPARSE_CSR_FORMAT); + // CHECK_EQ(inputs[2].sparse().dataType(), SPARSE_FLOAT_VALUE); + EXPECT_EQ(inputs[2].sparse().getRowBuf(), sparse.getRows()); + EXPECT_EQ(inputs[2].sparse().getColBuf(), sparse.getCols()); + }; + + // call function + function(argments); +} + +} // namespace paddle diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt index 0b3126155d0c0872a70fc83260d4ea34161cb717..75a2acc55ec3d33687f96d2b0398e52b69e8680d 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/function/CMakeLists.txt @@ -3,6 +3,7 @@ file(GLOB cpp_files . *Op.cpp) list(APPEND h_files Function.h) list(APPEND cpp_files Function.cpp) +list(APPEND cpp_files BufferArg.cpp) if(WITH_GPU) file(GLOB cu_files . *OpGpu.cu) @@ -10,16 +11,20 @@ if(WITH_GPU) endif() add_library(paddle_function STATIC ${cpp_files} ${cu_objs}) +add_dependencies(paddle_function ${external_project_dependencies}) + if(WITH_GPU) if(WITH_TESTING) # TODO: # file(GLOB test_files . *OpTest.cpp) # add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files}) - add_simple_unittest(CrossMapNormalOpTest) - add_unittest(ContextProjectionOpTest - ContextProjectionOpTest.cpp - ../gserver/tests/TestUtil.cpp) + # add_simple_unittest(CrossMapNormalOpTest) + add_simple_unittest(TensorShapeTest) + add_simple_unittest(TensorTypeTest) + add_simple_unittest(BufferArgTest) + add_simple_unittest(FunctionTest) + # add_simple_unittest(ContextProjectionOpTest) endif() endif() diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/function/ContextProjectionOp.cpp index bd367a859e10c0522206cd0215970922905905ed..cb448562ebb37022f727ee65024f06f69d63e9cb 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/function/ContextProjectionOp.cpp @@ -19,17 +19,15 @@ limitations under the License. */ namespace paddle { template <> -void ContextProjectionForward(CpuMatrix* out_mat, - const CpuMatrix* input_mat, - const CpuMatrix* weight_mat, +void ContextProjectionForward(CpuMatrix& out_mat, + const CpuMatrix& input_mat, + const CpuMatrix& weight_mat, const CpuIVector& seq_vec, size_t context_length, int context_start, size_t begin_pad) { const int* starts = seq_vec.getData(); const size_t num_sequences = seq_vec.getSize() - 1; - auto w_mat = const_cast(weight_mat); - auto in_mat = const_cast(input_mat); for (size_t i = 0; i < num_sequences; ++i) { for (size_t j = 0; j < context_length; ++j) { int begin = starts[i] + context_start + j; @@ -39,10 +37,11 @@ void ContextProjectionForward(CpuMatrix* out_mat, if (begin < starts[i]) { int64_t pad_size = std::min(starts[i] - begin, starts[i + 1] - starts[i]); - MatrixPtr mat = out_mat->subMatrix(starts[i], pad_size); - if (w_mat) { - MatrixPtr sub = w_mat->subMatrix(j, pad_size); - mat->addAtOffset(*sub, j * in_mat->getWidth()); + MatrixPtr mat = out_mat.subMatrix(starts[i], pad_size); + if (weight_mat) { + MatrixPtr sub = + const_cast(weight_mat).subMatrix(j, pad_size); + mat->addAtOffset(*sub, j * input_mat.getWidth()); } dst_begin = starts[i] + pad_size; begin = starts[i]; @@ -50,19 +49,22 @@ void ContextProjectionForward(CpuMatrix* out_mat, if (end > starts[i + 1]) { int64_t pad_size = std::min(end - starts[i + 1], starts[i + 1] - starts[i]); - MatrixPtr mat = out_mat->subMatrix(starts[i + 1] - pad_size, pad_size); - if (w_mat) { - MatrixPtr sub = w_mat->subMatrix( - begin_pad + context_start + j - pad_size, pad_size); - mat->addAtOffset(*sub, j * in_mat->getWidth()); + MatrixPtr mat = out_mat.subMatrix(starts[i + 1] - pad_size, pad_size); + if (weight_mat) { + MatrixPtr sub = + const_cast(weight_mat) + .subMatrix(begin_pad + context_start + j - pad_size, + pad_size); + mat->addAtOffset(*sub, j * input_mat.getWidth()); } dst_end = starts[i + 1] - pad_size; end = starts[i + 1]; } if (end <= begin) continue; - MatrixPtr src = in_mat->subMatrix(begin, end - begin); - MatrixPtr dst = out_mat->subMatrix(dst_begin, dst_end - dst_begin); - dst->addAtOffset(*src, j * in_mat->getWidth()); + MatrixPtr src = + const_cast(input_mat).subMatrix(begin, end - begin); + MatrixPtr dst = out_mat.subMatrix(dst_begin, dst_end - dst_begin); + dst->addAtOffset(*src, j * input_mat.getWidth()); } } } @@ -82,40 +84,32 @@ public: begin_pad_ = config.get("begin_pad"); } - void calc(const Arguments& inputs, - const Arguments& outputs, - const Arguments& inouts) override { - CHECK_EQ(3, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ((size_t)3, inputs.size()); + CHECK_EQ((size_t)1, outputs.size()); - CHECK(outputs[0].getData() && inputs[0].getData() && inputs[2].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 2); - CHECK_EQ(inputs[2].dims_.size(), 1); + CHECK(outputs[0].data() && inputs[0].data() && inputs[2].data()); + CHECK_EQ(outputs[0].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[0].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[1].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[2].shape().ndims(), (size_t)1); /// dim of output = dim of input * context_length - CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); + CHECK_EQ(outputs[0].shape()[1], inputs[0].shape()[1] * context_length_); /// dim of input == dim of weight - CHECK_EQ(inputs[0].dims_[1], inputs[1].dims_[1]); + CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]); /// input and output has the same batch_size - CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]); - - auto out_mat = std::make_shared::type>( - outputs[0].getData(), outputs[0].dims_[0], outputs[0].dims_[1]); - const auto in_mat = std::make_shared::type>( - inputs[0].getData(), inputs[0].dims_[0], inputs[0].dims_[1]); - const auto w_mat = - !inputs[1].getData() - ? nullptr - : std::make_shared::type>( - inputs[1].getData(), inputs[1].dims_[0], inputs[1].dims_[1]); - typename SequenceT::type seq_vec( - inputs[2].dims_[0], reinterpret_cast(inputs[2].getData())); - - ContextProjectionForward(out_mat.get(), - in_mat.get(), - w_mat.get(), + CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]); + + CHECK_EQ(outputs[0].getArgType(), ADD_TO); + auto out_mat = outputs[0].matrix(); + auto in_mat = inputs[0].matrix(); + auto w_mat = !inputs[1].data() + ? typename Tensor::Matrix(nullptr, 0, 0) + : inputs[1].matrix(); + auto seq_vec = inputs[2].vector(); + ContextProjectionForward(out_mat, + in_mat, + w_mat, seq_vec, context_length_, context_start_, @@ -129,18 +123,17 @@ private: }; template <> -void ContextProjectionBackward(CpuMatrix* out_grad_mat, - CpuMatrix* in_grad_mat, - CpuMatrix* w_grad_mat, +void ContextProjectionBackward(CpuMatrix& out_grad_mat, + CpuMatrix& in_grad_mat, + CpuMatrix& w_grad_mat, const CpuIVector& seq_vec, size_t context_length, int context_start, size_t begin_pad, bool is_padding, size_t total_pad) { - CHECK(out_grad_mat); - size_t input_dim = in_grad_mat ? in_grad_mat->getWidth() - : w_grad_mat ? w_grad_mat->getWidth() : 0; + size_t input_dim = in_grad_mat ? in_grad_mat.getWidth() + : w_grad_mat ? w_grad_mat.getWidth() : 0; const int* starts = seq_vec.getData(); size_t num_sequences = seq_vec.getSize() - 1; for (size_t i = 0; i < num_sequences; ++i) { @@ -153,8 +146,8 @@ void ContextProjectionBackward(CpuMatrix* out_grad_mat, int64_t pad_size = std::min(starts[i] - begin, starts[i + 1] - starts[i]); if (is_padding && w_grad_mat) { - MatrixPtr mat = out_grad_mat->subMatrix(starts[i], pad_size); - MatrixPtr sub = w_grad_mat->subMatrix(j, pad_size); + MatrixPtr mat = out_grad_mat.subMatrix(starts[i], pad_size); + MatrixPtr sub = w_grad_mat.subMatrix(j, pad_size); sub->addAtOffset(*mat, j * input_dim); } dst_begin = starts[i] + pad_size; @@ -165,8 +158,8 @@ void ContextProjectionBackward(CpuMatrix* out_grad_mat, std::min(end - starts[i + 1], starts[i + 1] - starts[i]); if (is_padding && w_grad_mat) { MatrixPtr mat = - out_grad_mat->subMatrix(starts[i + 1] - pad_size, pad_size); - MatrixPtr sub = w_grad_mat->subMatrix( + out_grad_mat.subMatrix(starts[i + 1] - pad_size, pad_size); + MatrixPtr sub = w_grad_mat.subMatrix( begin_pad + context_start + j - pad_size, pad_size); sub->addAtOffset(*mat, j * input_dim); } @@ -175,8 +168,8 @@ void ContextProjectionBackward(CpuMatrix* out_grad_mat, } if (end <= begin) continue; if (!in_grad_mat) continue; - MatrixPtr src = in_grad_mat->subMatrix(begin, end - begin); - MatrixPtr dst = out_grad_mat->subMatrix(dst_begin, dst_end - dst_begin); + MatrixPtr src = in_grad_mat.subMatrix(begin, end - begin); + MatrixPtr dst = out_grad_mat.subMatrix(dst_begin, dst_end - dst_begin); src->addAtOffset(*dst, j * input_dim); } } @@ -199,44 +192,36 @@ public: total_pad_ = config.get("total_pad"); } - void calc(const Arguments& inputs, - const Arguments& outputs, - const Arguments& inouts) override { - CHECK_EQ(3, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ((size_t)3, inputs.size()); + CHECK_EQ((size_t)1, outputs.size()); - CHECK(outputs[0].getData() && inputs[2].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 2); - CHECK_EQ(inputs[2].dims_.size(), 1); + CHECK(outputs[0].data() && inputs[2].data()); + CHECK_EQ(outputs[0].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[0].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[1].shape().ndims(), (size_t)2); + CHECK_EQ(inputs[2].shape().ndims(), (size_t)1); /// dim of input == dim of weight - CHECK_EQ(inputs[0].dims_[1], inputs[1].dims_[1]); + CHECK_EQ(inputs[0].shape()[1], inputs[1].shape()[1]); /// input and output has the same batch_size - CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]); + CHECK_EQ(inputs[0].shape()[0], outputs[0].shape()[0]); /// dim of output = dim of input * context_length - CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); + CHECK_EQ(outputs[0].shape()[1], inputs[0].shape()[1] * context_length_); - auto out_grad_mat = std::make_shared::type>( - outputs[0].getData(), outputs[0].dims_[0], outputs[0].dims_[1]); - auto in_grad_mat = - !inputs[0].getData() - ? nullptr - : std::make_shared::type>( - inputs[0].getData(), inputs[0].dims_[0], inputs[0].dims_[1]); - auto w_grad_mat = - !inputs[1].getData() - ? nullptr - : std::make_shared::type>( - inputs[1].getData(), inputs[1].dims_[0], inputs[1].dims_[1]); - typename SequenceT::type seq_vec( - inputs[2].dims_[0], reinterpret_cast(inputs[2].getData())); + CHECK_EQ(outputs[0].getArgType(), ADD_TO); - ContextProjectionBackward(out_grad_mat.get(), - in_grad_mat ? in_grad_mat.get() : nullptr, - w_grad_mat ? w_grad_mat.get() : nullptr, + auto out_grad_mat = outputs[0].matrix(); + auto in_grad_mat = + !inputs[0].data() ? typename Tensor::Matrix(nullptr, 0, 0) + : inputs[0].matrix(); + auto w_grad_mat = !inputs[1].data() + ? typename Tensor::Matrix(nullptr, 0, 0) + : inputs[1].matrix(); + auto seq_vec = inputs[2].vector(); + ContextProjectionBackward(out_grad_mat, + in_grad_mat, + w_grad_mat, seq_vec, context_length_, context_start_, @@ -253,6 +238,7 @@ private: size_t total_pad_; }; +#if 0 /** * \param inputs[0] input grad. * \param inputs[1] input sequence. @@ -269,13 +255,13 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(2, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(2, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 1); + CHECK_EQ(static_cast(outputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[1].dims_.size()), 1); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); /// input and output has the same batch_size CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]); @@ -317,14 +303,14 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(2, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(2, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 1); + CHECK_EQ(static_cast(outputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[1].dims_.size()), 1); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); auto out_grad_mat = std::make_shared::type>( @@ -349,6 +335,7 @@ private: size_t begin_pad_; size_t total_pad_; }; +#endif REGISTER_TYPED_FUNC(ContextProjectionForward, CPU, @@ -363,6 +350,7 @@ REGISTER_TYPED_FUNC(ContextProjectionForward, REGISTER_TYPED_FUNC(ContextProjectionBackward, GPU, ContextProjectionBackwardFunc); +#if 0 REGISTER_TYPED_FUNC(ContextProjectionBackwardData, GPU, ContextProjectionBackwardDataFunc); @@ -370,4 +358,5 @@ REGISTER_TYPED_FUNC(ContextProjectionBackwardWeight, GPU, ContextProjectionBackwardWeightFunc); #endif +#endif } // namespace paddle diff --git a/paddle/function/ContextProjectionOp.h b/paddle/function/ContextProjectionOp.h index 93eb050fde35f474750f3c2efa72b7471f654b75..a558df5e072f2f4dcc5c45afa385b3cf88872d26 100644 --- a/paddle/function/ContextProjectionOp.h +++ b/paddle/function/ContextProjectionOp.h @@ -31,14 +31,15 @@ namespace paddle { * \param[in] is_padding whether padding 0 or not. * */ -template -void ContextProjectionForward(typename MatrixT::type* output, - const typename MatrixT::type* input, - const typename MatrixT::type* weight, - const typename SequenceT::type& sequence, - size_t context_length, - int context_start, - size_t begin_pad); +template +void ContextProjectionForward( + typename Tensor::Matrix& output, + const typename Tensor::Matrix& input, + const typename Tensor::Matrix& weight, + const typename Tensor::Vector& sequence, + size_t context_length, + int context_start, + size_t begin_pad); /** * \brief Context Projection Backward. @@ -53,30 +54,31 @@ void ContextProjectionForward(typename MatrixT::type* output, * \param[in] is_padding whether padding 0 or not. * */ -template -void ContextProjectionBackward(typename MatrixT::type* out_grad, - typename MatrixT::type* in_grad, - typename MatrixT::type* w_grad, - const typename SequenceT::type& seq_vec, - size_t context_length, - int context_start, - size_t begin_pad, - bool is_padding, - size_t total_pad); +template +void ContextProjectionBackward( + typename Tensor::Matrix& out_grad, + typename Tensor::Matrix& in_grad, + typename Tensor::Matrix& w_grad, + const typename Tensor::Vector& seq_vec, + size_t context_length, + int context_start, + size_t begin_pad, + bool is_padding, + size_t total_pad); -template +template void ContextProjectionBackwardData( - typename MatrixT::type* out_grad, - typename MatrixT::type* in_grad, - const typename SequenceT::type& sequence, + typename Tensor::Matrix& out_grad, + typename Tensor::Matrix& in_grad, + const typename Tensor::Vector& sequence, size_t context_length, int context_start); -template +template void ContextProjectionBackwardWeight( - typename MatrixT::type* out_grad, - typename MatrixT::type* w_grad, - const typename SequenceT::type& seq_vec, + typename Tensor::Matrix& out_grad, + typename Tensor::Matrix& w_grad, + const typename Tensor::Vector& seq_vec, size_t context_length, int context_start, size_t total_pad, diff --git a/paddle/function/ContextProjectionOpGpu.cu b/paddle/function/ContextProjectionOpGpu.cu index 1ec7058f96c8200728e5add051d5fa6a77a97e36..6a4a01a6510416fc1f945305203f55ece7a28f11 100644 --- a/paddle/function/ContextProjectionOpGpu.cu +++ b/paddle/function/ContextProjectionOpGpu.cu @@ -120,20 +120,19 @@ void hl_context_projection_forward(const real* input, } template <> -void ContextProjectionForward(GpuMatrix* output, - const GpuMatrix* input, - const GpuMatrix* weight, +void ContextProjectionForward(GpuMatrix& output, + const GpuMatrix& input, + const GpuMatrix& weight, const GpuIVector& sequence, size_t context_length, int context_start, size_t begin_pad) { - CHECK(input && output); - hl_context_projection_forward(input->getData(), + hl_context_projection_forward(input.getData(), sequence.getData(), - weight ? weight->getData() : nullptr, - output->getData(), + weight ? weight.getData() : nullptr, + output.getData(), sequence.getSize() - 1, - input->getWidth(), + input.getWidth(), context_length, context_start, begin_pad); @@ -217,17 +216,16 @@ void hl_context_projection_backward_data(real* out_grad, } template <> -void ContextProjectionBackwardData(GpuMatrix* out_grad, - GpuMatrix* in_grad, +void ContextProjectionBackwardData(GpuMatrix& out_grad, + GpuMatrix& in_grad, const GpuIVector& sequence, size_t context_length, int context_start) { - CHECK(in_grad && out_grad); - hl_context_projection_backward_data(out_grad->getData(), + hl_context_projection_backward_data(out_grad.getData(), sequence.getData(), - in_grad->getData(), + in_grad.getData(), sequence.getSize() - 1, - in_grad->getWidth(), + in_grad.getWidth(), context_length, context_start); } @@ -348,19 +346,18 @@ void hl_context_projection_backward_weight(real* out_grad, template <> void ContextProjectionBackwardWeight( - GpuMatrix* out_grad, - GpuMatrix* w_grad, + GpuMatrix& out_grad, + GpuMatrix& w_grad, const GpuIVector& seq_vec, size_t context_length, int context_start, size_t total_pad, size_t begin_pad) { - CHECK(out_grad && w_grad); - hl_context_projection_backward_weight(out_grad->getData(), + hl_context_projection_backward_weight(out_grad.getData(), seq_vec.getData(), - w_grad->getData(), + w_grad.getData(), seq_vec.getSize() - 1, - w_grad->getWidth(), + w_grad.getWidth(), total_pad, context_length, context_start, @@ -368,16 +365,15 @@ void ContextProjectionBackwardWeight( } template <> -void ContextProjectionBackward(GpuMatrix* out_grad, - GpuMatrix* in_grad, - GpuMatrix* w_grad, +void ContextProjectionBackward(GpuMatrix& out_grad, + GpuMatrix& in_grad, + GpuMatrix& w_grad, const GpuIVector& sequence, size_t context_length, int context_start, size_t begin_pad, bool is_padding, size_t total_pad) { - CHECK(out_grad); if (in_grad) { ContextProjectionBackwardData( out_grad, diff --git a/paddle/function/ContextProjectionOpTest.cpp b/paddle/function/ContextProjectionOpTest.cpp index 359428fc03d698145cb880bd735c908838f96f56..6223d2fd23ac3bbb4fbcf51d37d22feaf3b1330b 100644 --- a/paddle/function/ContextProjectionOpTest.cpp +++ b/paddle/function/ContextProjectionOpTest.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include #include "FunctionTest.h" -#include "paddle/gserver/tests/TestUtil.h" #include "paddle/math/Matrix.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/function/CrossMapNormalOp.cpp b/paddle/function/CrossMapNormalOp.cpp index f13eb78d27d900064f8cf0dc4194d1e34ded2b14..92980c503fdaaaa9ac600070197dba6ba4bfb7a4 100644 --- a/paddle/function/CrossMapNormalOp.cpp +++ b/paddle/function/CrossMapNormalOp.cpp @@ -112,6 +112,8 @@ void CrossMapNormalGrad(real* inputsGrad, } /** + * \brief {o_0, o_1} = calc(i_0) + * * \param inputs[0] input value. * \param outputs[0] output value. * \param outputs[1] denoms. @@ -125,27 +127,24 @@ public: pow_ = config.get("pow"); } - void calc(const Arguments& inputs, - const Arguments& outputs, - const Arguments& inouts) override { - CHECK_EQ(1, inputs.size()); - CHECK_EQ(2, outputs.size()); - CHECK_EQ(0, inouts.size()); - - CHECK_EQ(inputs[0].dims_.size(), 4); - for (size_t i = 0; i < inputs[0].dims_.size(); i++) { - CHECK_EQ(inputs[0].dims_[i], outputs[0].dims_[i]); - CHECK_EQ(inputs[0].dims_[i], outputs[1].dims_[i]); - } + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ((size_t)1, inputs.size()); + CHECK_EQ((size_t)2, outputs.size()); + + CHECK_EQ(inputs[0].shape().ndims(), (size_t)4); + CHECK(inputs[0].shape() == outputs[0].shape()); + CHECK(inputs[0].shape() == outputs[1].shape()); - size_t samples = inputs[0].dims_[0]; - size_t channels = inputs[0].dims_[1]; - size_t height = inputs[0].dims_[2]; - size_t width = inputs[0].dims_[3]; + CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO); + CHECK_EQ(outputs[1].getArgType(), ASSIGN_TO); + size_t samples = inputs[0].shape()[0]; + size_t channels = inputs[0].shape()[1]; + size_t height = inputs[0].shape()[2]; + size_t width = inputs[0].shape()[3]; - CrossMapNormal(outputs[0].getData(), - outputs[1].getData(), - inputs[0].getData(), + CrossMapNormal(outputs[0].data(), + outputs[1].data(), + inputs[0].data(), samples, channels, height, @@ -162,6 +161,8 @@ private: }; /** + * \brief {o_0} = calc(i_0, i_1, i_2, i_3) + * * \param inputs[0] input value. * \param inputs[1] output value. * \param inputs[2] output grad. @@ -177,31 +178,29 @@ public: pow_ = config.get("pow"); } - void calc(const Arguments& inputs, - const Arguments& outputs, - const Arguments& inouts) override { - CHECK_EQ(4, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); - - CHECK_EQ(inputs[0].dims_.size(), 4); - for (size_t i = 0; i < inputs[0].dims_.size(); i++) { - CHECK_EQ(inputs[0].dims_[i], inputs[1].dims_[i]); - CHECK_EQ(inputs[0].dims_[i], inputs[2].dims_[i]); - CHECK_EQ(inputs[0].dims_[i], inputs[3].dims_[i]); - CHECK_EQ(inputs[0].dims_[i], outputs[0].dims_[i]); - } - - size_t samples = inputs[0].dims_[0]; - size_t channels = inputs[0].dims_[1]; - size_t height = inputs[0].dims_[2]; - size_t width = inputs[0].dims_[3]; - - CrossMapNormalGrad(outputs[0].getData(), - inputs[0].getData(), - inputs[1].getData(), - inputs[2].getData(), - inputs[3].getData(), + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ((size_t)4, inputs.size()); + CHECK_EQ((size_t)1, outputs.size()); + + CHECK_EQ(inputs[0].shape().ndims(), (size_t)4); + CHECK(inputs[0].shape() == inputs[1].shape()); + CHECK(inputs[0].shape() == inputs[2].shape()); + CHECK(inputs[0].shape() == inputs[3].shape()); + CHECK(inputs[0].shape() == outputs[0].shape()); + + // TODO(hedaoyuan): need support ASSIGN_TO mode. + CHECK_EQ(outputs[0].getArgType(), ADD_TO); + + size_t samples = inputs[0].shape()[0]; + size_t channels = inputs[0].shape()[1]; + size_t height = inputs[0].shape()[2]; + size_t width = inputs[0].shape()[3]; + + CrossMapNormalGrad(outputs[0].data(), + inputs[0].data(), + inputs[1].data(), + inputs[2].data(), + inputs[3].data(), samples, channels, height, diff --git a/paddle/function/Function.cpp b/paddle/function/Function.cpp index 6f82a8d053bc203eed44bd0d8d4c47d23a15268d..dbe3a4e9f608df6333a5637f2d962a555b04d7c3 100644 --- a/paddle/function/Function.cpp +++ b/paddle/function/Function.cpp @@ -46,32 +46,50 @@ bool FuncConfig::get(const std::string& key) const { template <> FuncConfig& FuncConfig::set(const std::string& key, size_t v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].s = v; return *this; } template <> FuncConfig& FuncConfig::set(const std::string& key, real v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].r = v; return *this; } template <> FuncConfig& FuncConfig::set(const std::string& key, int v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].i = v; return *this; } template <> FuncConfig& FuncConfig::set(const std::string& key, bool v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].b = v; return *this; } +void BufferArgs::addArg(const Matrix& arg, + const TensorShape& shape, + ArgType argType) { + args_.push_back(std::make_shared(arg, shape, argType)); +} + +void BufferArgs::addArg(const CpuSparseMatrix& arg, ArgType argType) { + args_.push_back(std::make_shared(arg, argType)); +} + +void BufferArgs::addArg(const GpuSparseMatrix& arg, ArgType argType) { + args_.push_back(std::make_shared(arg, argType)); +} + ClassRegistrar FunctionBase::funcRegistrar_; } // namespace paddle diff --git a/paddle/function/Function.h b/paddle/function/Function.h index 9e8cbb8e48c30e80c5057fc53c050b67d3957188..249f8f9cfad58bf596e8cdce9188409b5690f969 100644 --- a/paddle/function/Function.h +++ b/paddle/function/Function.h @@ -16,57 +16,17 @@ limitations under the License. */ #include #include +#include "BufferArg.h" #include "paddle/math/Matrix.h" #include "paddle/utils/ClassRegistrar.h" namespace paddle { -enum DeviceType { - DEVICE_TYPE_UNSPECIFIED = 0, - DEVICE_TYPE_CPU = 1, - DEVICE_TYPE_GPU = 2, -}; - -template -struct MatrixT; - -template <> -struct MatrixT { - using type = CpuMatrix; -}; - -template <> -struct MatrixT { - using type = GpuMatrix; -}; - -template -struct SequenceT; - -template <> -struct SequenceT { - using type = CpuIVector; -}; - -template <> -struct SequenceT { - using type = GpuIVector; -}; - -typedef std::vector Dims; - -class Tensor { -public: - Tensor(real* data, const Dims& dim) : buf_(data), dims_(dim) {} - - real* getData() const { return buf_; } - - real* buf_; - Dims dims_; -}; - -typedef std::vector Arguments; - +/** + * Function Configuration. + * The argument type of Function::init. + * Follow-up will consider moving this data structure to Proto inside. + */ class FuncConfig { public: union value { @@ -86,15 +46,70 @@ protected: std::map valueMap_; }; +/** + * Argument type for Function::calc(). + * A BufferArgs contains a set of BufferArg, + * because Function can have multiple inputs and outputs. + */ +class BufferArgs { +public: + BufferArgs() {} + size_t size() const { return args_.size(); } + + // add argument into BufferArgs + // Tensor can be Matrix, Vector, IVector. + // For inputs, do not need argType. + // For outputs, the argType needs to be specified as ASSIGN_TO or ADD_TO. + template + void addArg(const Tensor& arg, ArgType argType = UNSPECIFIED) { + args_.push_back(std::make_shared(arg, argType)); + } + + // Add arg into BufferArgs and reshape the arg. + // + // For example, arg represents an image buffer, + // but Matrix can only represent a two-dimensional Tensor. + // So need an extra argument to describe the shape of the image buffer. + void addArg(const Matrix& arg, + const TensorShape& shape, + ArgType argType = UNSPECIFIED); + + void addArg(const CpuSparseMatrix& arg, ArgType argType = UNSPECIFIED); + void addArg(const GpuSparseMatrix& arg, ArgType argType = UNSPECIFIED); + + // get argument + const BufferArg& operator[](size_t num) const { + CHECK_LT(num, args_.size()); + return *args_[num]; + } + +private: + std::vector args_; +}; + +/** + * \brief Base class for Function. + * The basic Function implementation requires override init and calc interfaces. + * + * Function inputs are readonly, Function outputs have two modes: ASSIGN_TO + * and ADD_TO. + * If output.getArgType() == ASSIGN_TO, this is assign mode, and the calculation + * result of Function assigned to the output BufferArg. + * If output.getArgType() == ADD_TO, this is add mode, and the calculation + * result of Function need added to the output BufferArg. + * + * For example: + * ASSIGN_TO: output = Function(inputs) + * ADD_TO: output += Function(inputs) + * If Function has more than one output, each output can have different modes. + */ class FunctionBase { public: virtual ~FunctionBase() {} virtual void init(const FuncConfig& config) {} - virtual void calc(const Arguments& inputs, - const Arguments& outputs, - const Arguments& inouts) {} + virtual void calc(const BufferArgs& inputs, const BufferArgs& outputs) {} static ClassRegistrar funcRegistrar_; }; diff --git a/paddle/function/FunctionTest.cpp b/paddle/function/FunctionTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ce908320a6f6f764e8fdacc96432aca78d7b2df --- /dev/null +++ b/paddle/function/FunctionTest.cpp @@ -0,0 +1,59 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "Function.h" +#include + +namespace paddle { + +template +void FunctionApi(typename Tensor::Matrix& output, + const typename Tensor::Matrix& input); + +template <> +void FunctionApi(CpuMatrix& output, const CpuMatrix& input) { + EXPECT_EQ(output.getHeight(), 100); + EXPECT_EQ(output.getWidth(), 200); +} + +template <> +void FunctionApi(GpuMatrix& output, const GpuMatrix& input) { + EXPECT_EQ(output.getHeight(), 10); + EXPECT_EQ(output.getWidth(), 20); +} + +template +void Function(const BufferArgs& arguments) { + const auto input = arguments[0].matrix(); + auto output = arguments[1].matrix(); + FunctionApi(output, input); +} + +TEST(Function, BufferArgs) { + CpuMatrix cpuInput = CpuMatrix(100, 200); + CpuMatrix cpuOutput = CpuMatrix(100, 200); + BufferArgs cpuArgments; + cpuArgments.addArg(cpuInput); + cpuArgments.addArg(cpuOutput); + Function(cpuArgments); + + GpuMatrix gpuInput = GpuMatrix(10, 20); + GpuMatrix gpuOutput = GpuMatrix(10, 20); + BufferArgs gpuArgments; + gpuArgments.addArg(gpuInput); + gpuArgments.addArg(gpuOutput); + Function(gpuArgments); +} + +} // namespace paddle diff --git a/paddle/function/TensorShape.h b/paddle/function/TensorShape.h new file mode 100644 index 0000000000000000000000000000000000000000..e491e3f1d6b26e14a5273b3b5a38aec941f5a9e5 --- /dev/null +++ b/paddle/function/TensorShape.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +namespace paddle { + +/** + * TensorShape used to represent shape of normal tensor. + */ +class TensorShape { +public: + TensorShape() : ndims_(0), nelements_(0) { initDims(0); } + + TensorShape(size_t ndims) : ndims_(ndims), nelements_(1) { initDims(ndims); }; + + TensorShape(std::initializer_list dims) { + ndims_ = dims.size(); + initDims(ndims_); + dims_.assign(dims); + numElements(); + }; + + TensorShape(const TensorShape& t) + : ndims_(t.ndims_), nelements_(t.nelements_) { + initDims(ndims_); + dims_.assign(t.dims_.begin(), t.dims_.end()); + }; + + // get the size of specified dimension + size_t operator[](size_t dim) const { + CHECK_GE(dim, (size_t)0); + CHECK_LT(dim, ndims_); + return dims_[dim]; + } + + // set the size of specified dimension + void setDim(size_t dim, size_t size) { + CHECK_GE(dim, (size_t)0); + CHECK_LT(dim, ndims_); + dims_[dim] = size; + numElements(); + } + + // number of dimensions of the tensor + size_t ndims() const { return ndims_; } + + size_t getElements() const { return nelements_; } + + bool operator==(const TensorShape& t) const { + if (ndims() != t.ndims()) return false; + for (size_t i = 0; i < ndims(); i++) { + if (dims_[i] != t.dims_[i]) return false; + } + + return true; + } + + bool operator!=(const TensorShape& t) const { return !(*this == t); } + +private: + // compute number of elements + void numElements() { + nelements_ = 1; + for (size_t n = 0; n < ndims_; n++) { + nelements_ *= dims_[n]; + } + } + + // init dims_ + void initDims(size_t ndims) { + size_t count = ndims < 4 ? 4 : ndims; + dims_.assign(count, 1); + } + + // number of dimensions + // ndims_ may be not equeal dims_.size() + size_t ndims_; + // number of elements + size_t nelements_; + std::vector dims_; +}; + +} // namespace paddle diff --git a/paddle/function/TensorShapeTest.cpp b/paddle/function/TensorShapeTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..45a2e106e7fc3f0e9e57cf8c2bb549d747f4f49b --- /dev/null +++ b/paddle/function/TensorShapeTest.cpp @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "TensorShape.h" +#include + +namespace paddle { + +TEST(TensorShape, Constructor) { + TensorShape t1; + EXPECT_EQ(t1.ndims(), 0); + EXPECT_EQ(t1.getElements(), 0); + + TensorShape t2(3); + EXPECT_EQ(t2.ndims(), 3); + EXPECT_EQ(t2.getElements(), 1); + + TensorShape t3({8, 10}); + EXPECT_EQ(t3.ndims(), 2); + EXPECT_EQ(t3.getElements(), 80); + + TensorShape t4(t3); + EXPECT_EQ(t4.ndims(), t3.ndims()); + EXPECT_EQ(t4.getElements(), t3.getElements()); + + TensorShape t5({1, 2, 3, 4, 5}); + EXPECT_EQ(t5.ndims(), 5); + EXPECT_EQ(t5.getElements(), 120); +} + +TEST(TensorShape, GetAndSet) { + TensorShape t({1, 2, 3}); + EXPECT_EQ(t.ndims(), 3); + EXPECT_EQ(t.getElements(), 6); + + EXPECT_EQ(t[1], 2); + t.setDim(1, 100); + EXPECT_EQ(t.getElements(), 300); + EXPECT_EQ(t[1], 100); +} + +} // namespace paddle diff --git a/paddle/function/TensorType.h b/paddle/function/TensorType.h new file mode 100644 index 0000000000000000000000000000000000000000..98942cff9e2ea44e78727d66a059ab8cf5f0ef7c --- /dev/null +++ b/paddle/function/TensorType.h @@ -0,0 +1,121 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/math/Matrix.h" + +namespace paddle { + +enum ValueType { + VALUE_TYPE_INT32 = 0, + VALUE_TYPE_FLOAT = 1, + VALUE_TYPE_DOUBLE = 2, + VALUE_TYPE_BYTE = 3 +}; + +enum DeviceType { + DEVICE_TYPE_UNSPECIFIED = 0, + DEVICE_TYPE_CPU = 1, + DEVICE_TYPE_GPU = 2 +}; + +inline int sizeOfValuType(ValueType valueType) { + if (valueType == VALUE_TYPE_INT32) { + return 4; + } else if (valueType == VALUE_TYPE_FLOAT) { + return 4; + } else if (valueType == VALUE_TYPE_DOUBLE) { + return 8; + } else { + LOG(FATAL) << "Unknown type: " << valueType; + return 0; + } +} + +template +struct DataType; + +template <> +struct DataType { + static const ValueType value = VALUE_TYPE_FLOAT; +}; + +template <> +struct DataType { + static const ValueType value = VALUE_TYPE_DOUBLE; +}; + +template <> +struct DataType { + static const ValueType value = VALUE_TYPE_INT32; +}; + +namespace detail { + +template +struct MatrixT; + +template <> +struct MatrixT { + using type = CpuMatrix; +}; + +template <> +struct MatrixT { + using type = GpuMatrix; +}; + +template <> +struct MatrixT { + using type = void; // Not implemented +}; + +template <> +struct MatrixT { + using type = void; // Not implemented +}; + +template +struct VectorT; + +template <> +struct VectorT { + using type = CpuVector; +}; + +template <> +struct VectorT { + using type = GpuVector; +}; + +template <> +struct VectorT { + using type = CpuIVector; +}; + +template <> +struct VectorT { + using type = GpuIVector; +}; + +} // namespace detail + +template +struct Tensor { + typedef typename detail::MatrixT::type Matrix; + typedef typename detail::VectorT::type Vector; +}; + +} // namespace paddle diff --git a/paddle/function/TensorTypeTest.cpp b/paddle/function/TensorTypeTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e50e46f3e99111731d9587f3e4ddfd4b26ae27e9 --- /dev/null +++ b/paddle/function/TensorTypeTest.cpp @@ -0,0 +1,64 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "TensorType.h" +#include + +namespace paddle { + +TEST(TensorType, Matrix) { + Tensor::Matrix matrix(100, 200); + EXPECT_EQ(matrix.getHeight(), 100); + EXPECT_EQ(matrix.getWidth(), 200); + EXPECT_EQ(matrix.getElementCnt(), 100 * 200); + EXPECT_EQ(matrix.useGpu(), false); + + Tensor::Matrix testGpu(100, 200); + EXPECT_EQ(testGpu.useGpu(), true); +} + +TEST(TensorType, Vector) { + Tensor::Vector cpuVector(100); + Tensor::Vector gpuVector(100); + EXPECT_EQ(cpuVector.useGpu(), false); + EXPECT_EQ(gpuVector.useGpu(), true); + EXPECT_EQ(cpuVector.getSize(), 100); + EXPECT_EQ(gpuVector.getSize(), 100); + + Tensor::Vector cpuIVector(100); + Tensor::Vector gpuIVector(100); + EXPECT_EQ(cpuIVector.useGpu(), false); + EXPECT_EQ(gpuIVector.useGpu(), true); + EXPECT_EQ(cpuIVector.getSize(), 100); + EXPECT_EQ(gpuIVector.getSize(), 100); +} + +TEST(TensorType, EmptyMatrix) { + CpuMatrix empty(nullptr, 0, 0); + CpuMatrix nonEmpty(10, 10); + EXPECT_EQ(empty.isEmpty(), true); + EXPECT_EQ(nonEmpty.isEmpty(), false); + CHECK(nonEmpty); + auto function = [](const CpuMatrix& matrix) { + if (matrix) { + EXPECT_NE(matrix.getData(), nullptr); + } else { + EXPECT_EQ(matrix.getData(), nullptr); + } + }; + function(empty); + function(nonEmpty); +} + +} // namespace paddle diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 5f031fc7c0761a8fe97eb16fe1dd8e0a1debfcdb..9a2ad7567f0dc93d0a8e396fd88b2488afe9d049 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -30,12 +30,12 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Argument.h" #include "paddle/utils/ClassRegistrar.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Queue.h" #include "paddle/utils/ThreadLocal.h" #include "paddle/utils/Util.h" -#include "paddle/utils/common.h" namespace paddle { /** diff --git a/paddle/gserver/dataproviders/PyDataProvider.cpp b/paddle/gserver/dataproviders/PyDataProvider.cpp index 5bdd55309c8bf8d5dcf84f5dcef2c5c85249a668..b53790e764b9f9ad668abd1f4125695e3533a027 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider.cpp @@ -13,8 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PyDataProvider.h" -#include -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Util.h" diff --git a/paddle/gserver/layers/ContextProjection.cpp b/paddle/gserver/layers/ContextProjection.cpp index e947b2b9ecbebda11db5c049e1606a2d5926c28c..ebcc87cbf48a3c34a4e625e67f872fed69cdf44f 100644 --- a/paddle/gserver/layers/ContextProjection.cpp +++ b/paddle/gserver/layers/ContextProjection.cpp @@ -110,8 +110,8 @@ void ContextProjection::forward() { size_t input_dim = in_->value->getWidth(); size_t dim = out_->value->getWidth(); CHECK_EQ(dim, input_dim * config_.context_length()); - size_t batch_size = in_->value->getHeight(); - CHECK_EQ(forward_.size(), 1) << "Only one forward function here"; + // size_t batch_size = in_->value->getHeight(); + CHECK_EQ(forward_.size(), (size_t)1) << "Only one forward function here"; REGISTER_TIMER_INFO("ContextProjectionForward", getName().c_str()); bool is_padding = config_.trainable_padding(); @@ -119,14 +119,16 @@ void ContextProjection::forward() { auto w_ptr = state_ ? state_.get() : is_padding ? weight_->getW().get() : nullptr; auto start_pos = in_->sequenceStartPositions; - forward_[0]->calc({Tensor(in_->value->getData(), Dims{batch_size, input_dim}), - Tensor(w_ptr ? w_ptr->getData() : nullptr, - Dims{w_ptr ? w_ptr->getHeight() : 0, input_dim}), - Tensor(reinterpret_cast( - const_cast(start_pos->getData(useGpu_))), - Dims{start_pos->getSize()})}, - {Tensor(out_->value->getData(), Dims{batch_size, dim})}, - {}); + + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*in_->value); + inputs.addArg(CpuMatrix(w_ptr ? w_ptr->getData() : nullptr, + w_ptr ? w_ptr->getHeight() : 0, + input_dim)); + inputs.addArg(*in_->sequenceStartPositions->getVector(useGpu_)); + outputs.addArg(*out_->value, ADD_TO); + forward_[0]->calc(inputs, outputs); if (state_ && config_.context_start() < 0) { CHECK_EQ(1, in_->getNumSequences()); @@ -154,21 +156,24 @@ void ContextProjection::backward(const UpdateCallback& callback) { CHECK_EQ(dim, input_dim * config_.context_length()); size_t batch_size = in_->value->getHeight(); CHECK_EQ(batch_size, out_->value->getHeight()); - CHECK_EQ(backward_.size(), 1) << "Only one backward function here"; + CHECK_EQ(static_cast(backward_.size()), 1) + << "Only one backward function here"; REGISTER_TIMER_INFO("ContextProjectionBackward", getName().c_str()); bool is_padding = config_.trainable_padding(); auto start_pos = in_->sequenceStartPositions; auto w_ptr = is_padding ? weight_->getWGrad() : nullptr; - backward_[0]->calc({Tensor(in_->grad ? in_->grad->getData() : nullptr, - Dims{batch_size, input_dim}), - Tensor(w_ptr ? w_ptr->getData() : nullptr, - Dims{w_ptr ? w_ptr->getHeight() : 0, input_dim}), - Tensor(reinterpret_cast( - const_cast(start_pos->getData(useGpu_))), - Dims{start_pos->getSize()})}, - {Tensor(out_->grad->getData(), Dims{batch_size, dim})}, - {}); + + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(CpuMatrix( + in_->grad ? in_->grad->getData() : nullptr, batch_size, input_dim)); + inputs.addArg(CpuMatrix(w_ptr ? w_ptr->getData() : nullptr, + w_ptr ? w_ptr->getHeight() : 0, + input_dim)); + inputs.addArg(*in_->sequenceStartPositions->getVector(useGpu_)); + outputs.addArg(*out_->grad, ADD_TO); + backward_[0]->calc(inputs, outputs); if (config_.trainable_padding()) { weight_->getParameterPtr()->incUpdate(callback); diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp index e1c4b91ace21522a3bc640dfc4eaa1a42668ed02..0281170bc59855f6f4d2f4212523275a92d202d5 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -130,7 +130,8 @@ void ConvProjection::reshapeTensorDesc(int batchSize) { void ConvProjection::reshape(int batchSize) { size_t width = calOutputSize(); CHECK_EQ(width, out_->value->getWidth()); - CHECK_EQ(channels_ * imageH_ * imageW_, in_->value->getWidth()) + CHECK_EQ(static_cast(channels_ * imageH_ * imageW_), + in_->value->getWidth()) << "Wrong input size for convolution" << " channels=" << channels_ << " imageH=" << imageH_ << " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth(); diff --git a/paddle/gserver/layers/GruCompute.h b/paddle/gserver/layers/GruCompute.h index a56af21317d1d43c836f7fe599a4dc614804bfec..3340e38e62cc396fd619cfa2a1fad57b0a8cf4c7 100644 --- a/paddle/gserver/layers/GruCompute.h +++ b/paddle/gserver/layers/GruCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/gserver/layers/LstmCompute.h b/paddle/gserver/layers/LstmCompute.h index 0d65b4158ebdc04f199048bbba98317c89fc8beb..2588fad2793961da2b2af889e8985f49540f1bda 100644 --- a/paddle/gserver/layers/LstmCompute.h +++ b/paddle/gserver/layers/LstmCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/gserver/layers/MultinomialSampler.h b/paddle/gserver/layers/MultinomialSampler.h index b48073c80b6f57cd86ceb80b9d749548c3acc1ac..546ef9c1f24d1bc8abe68ba8b2fe6ab55f4b03e5 100644 --- a/paddle/gserver/layers/MultinomialSampler.h +++ b/paddle/gserver/layers/MultinomialSampler.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index 262d757c67e105a8d65619eed91de65d34cfe35e..4331009de7e98d2326049e563e46a55a20366507 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -59,7 +59,6 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap, void CMRProjectionNormLayer::forward(PassType passType) { Layer::forward(passType); - /* malloc memory for the output_ if necessary */ /* note: one sample correspond to one row */ MatrixPtr input = inputLayers_[0]->getOutputValue(); @@ -67,34 +66,36 @@ void CMRProjectionNormLayer::forward(PassType passType) { int size = getSize(); resetOutput(batchSize, size); - MatrixPtr outV = getOutputValue(); - Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_); - dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_}; - forward_[0]->calc( - {Tensor(input->getData(), dims_)}, - {Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)}, - {}); + shape_ = TensorShape({batchSize, channels_, imgSizeH_, imgSizeW_}); + + // prepare forward arguments + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*getInputValue(0), shape_); + outputs.addArg(*getOutputValue(), shape_, ASSIGN_TO); + outputs.addArg(*denoms_, shape_, ASSIGN_TO); + + forward_[0]->calc(inputs, outputs); } void CMRProjectionNormLayer::backward(const UpdateCallback& callback) { (void)callback; - if (NULL == inputLayers_[0]->getOutputGrad()) { + if (NULL == getInputGrad(0)) { return; } - /* Do derivation */ - MatrixPtr preOutGrad = inputLayers_[0]->getOutputGrad(); - MatrixPtr localGrad = getOutputGrad(); - MatrixPtr localOutV = getOutputValue(); - MatrixPtr preOutV = inputLayers_[0]->getOutputValue(); - - backward_[0]->calc({Tensor(preOutV->getData(), dims_), - Tensor(localOutV->getData(), dims_), - Tensor(localGrad->getData(), dims_), - Tensor(denoms_->getData(), dims_)}, - {Tensor(preOutGrad->getData(), dims_)}, - {}); + + // prepare backward arguments + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*getInputValue(0), shape_); + inputs.addArg(*getOutputValue(), shape_); + inputs.addArg(*getOutputGrad(), shape_); + inputs.addArg(*denoms_, shape_); + outputs.addArg(*getInputGrad(0), shape_, ADD_TO); + + backward_[0]->calc(inputs, outputs); } } // namespace paddle diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/gserver/layers/NormProjectionLayer.h index 6b2c5dde0d74db4b292d5006d19ce54d3194017e..2c0d8a3a718c484508b2bf6d4e7861d54a1682bb 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/gserver/layers/NormProjectionLayer.h @@ -41,6 +41,6 @@ public: void backward(const UpdateCallback& callback = nullptr); protected: - Dims dims_; + TensorShape shape_; }; } // namespace paddle diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index c26a2a7f06bc16c113f1812868b5d2b8a5060635..0caa5e1e11e6d42fadfa87149814c4b77b3b6271 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -2,8 +2,7 @@ ################### test_ProtoDataProvider ############ add_unittest_without_exec(test_ProtoDataProvider - test_ProtoDataProvider.cpp - TestUtil.cpp) + test_ProtoDataProvider.cpp) # test_ProtoDataProvider will mkdir as same name, # so if WORKING_DIRECTORY is default directory, then @@ -15,53 +14,46 @@ add_test(NAME test_ProtoDataProvider ################# test_LayerGrad ####################### add_unittest_without_exec(test_LayerGrad test_LayerGrad.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_LayerGrad COMMAND test_LayerGrad) add_unittest_without_exec(test_ActivationGrad test_ActivationGrad.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_ActivationGrad COMMAND test_ActivationGrad) ################# test_ConvTrans ####################### add_unittest_without_exec(test_ConvTrans test_ConvTrans.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_ConvTrans COMMAND test_ConvTrans) ################# test_PriorBox ####################### add_unittest_without_exec(test_PriorBox test_PriorBox.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_PriorBox COMMAND test_PriorBox) ################# test_ConvUnify ####################### add_unittest_without_exec(test_ConvUnify test_ConvUnify.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_ConvUnify COMMAND test_ConvUnify) ################# test_BatchNorm ####################### add_unittest_without_exec(test_BatchNorm test_BatchNorm.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_BatchNorm COMMAND test_BatchNorm) ################## test_Evaluator ####################### add_unittest(test_Evaluator - test_Evaluator.cpp - TestUtil.cpp) + test_Evaluator.cpp) ################ test_LinearChainCRF #################### add_simple_unittest(test_LinearChainCRF) @@ -72,8 +64,7 @@ add_simple_unittest(test_MultinomialSampler) ############## test_PyDataProvider ######################## if(WITH_PYTHON) add_unittest_without_exec(test_PyDataProvider - test_PyDataProvider.cpp - TestUtil.cpp) + test_PyDataProvider.cpp) add_test(NAME test_PyDataProvider COMMAND .set_python_path.sh -d ./gserver/tests:${PROJ_ROOT}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider @@ -81,18 +72,15 @@ if(WITH_PYTHON) endif() ############### test_RecurrentLayer ####################### -add_unittest(test_RecurrentLayer - test_RecurrentLayer.cpp - TestUtil.cpp) +add_simple_unittest(test_RecurrentLayer) ############### test_WarpCTCLayer ####################### if(NOT WITH_DOUBLE) add_unittest_without_exec(test_WarpCTCLayer - test_WarpCTCLayer.cpp - TestUtil.cpp) + test_WarpCTCLayer.cpp) add_test(NAME test_WarpCTCLayer - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${PROJ_ROOT}/warp-ctc/build + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${WARPCTC_LIB_DIR} WORKING_DIRECTORY ${PROJ_ROOT}/paddle) endif() @@ -108,8 +96,7 @@ add_test(NAME test_RecurrentGradientMachine WORKING_DIRECTORY ${PROJ_ROOT}/paddle) add_unittest_without_exec(test_NetworkCompare - test_NetworkCompare.cpp - TestUtil.cpp) + test_NetworkCompare.cpp) if(WITH_GPU) add_test(NAME test_NetworkCompare COMMAND .set_python_path.sh -d ${PROJ_ROOT}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index 57c176810fddf96828c210807673b7d1a3c739c0..ae016e74eaa84f7c43a30c09c8c4577e25360c4e 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -310,7 +310,7 @@ void initDataLayer(TestConfig testConf, testConf.inputDefs[i].labelSeqStartPositions; if (labelSeqStartPositions.size() != 0) { CHECK(!sequenceStartPositions); - CHECK_GE(labelSeqStartPositions.size(), 2); + CHECK_GE(static_cast(labelSeqStartPositions.size()), 2); sequenceStartPositions = ICpuGpuVector::create(labelSeqStartPositions.size(), useGpu); diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 4e88ac0e81ef2596f14995be53f7c5c20ddba2d7..9f68eb64d0b4ad27306d3b20387d74a7e438d910 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/gserver/layers/DataLayer.h" #include "paddle/trainer/Trainer.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace std; // NOLINT namespace paddle { diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index 7d7e68da5c5a9dbcba024002a988f26f7613b724..b201ba8a5a4146ab28cd96454f434f889d72a968 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 7f5fcb670b70aed9f0a04180d344556a0390122f..d07299bfe3c4147742384a45dc6f1698d9c382f4 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -22,7 +22,7 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -114,8 +114,8 @@ TEST(Layer, batchNorm) { bnLayer->forward(PASS_GC); convLayer->forward(PASS_GC); - CHECK_EQ(convLayer->getOutputValue()->getHeight(), 100); - CHECK_EQ(convLayer->getOutputValue()->getWidth(), 576); + CHECK_EQ(static_cast(convLayer->getOutputValue()->getHeight()), 100); + CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } int main(int argc, char** argv) { diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index dd3378304b433c135881310eb89273b6bf492af2..40bb1e2d73c81280a9b12114c13de851285c276b 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index ad99b50245cf56eb7db227fa582f6e3f41b47a7a..207fc0566fcf4a0d2e971f3c169a14a64146155b 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index e07066dad84aa6326c2447fc5ee80fa496735fbf..8165eb8269336193858962edac4f9637c2fc1c2f 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" #include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 2cc25f6b211e367fc82c07c30082c3e12c04e53d..66a70ecd41091b9590038dab3194dd2a0c59dd03 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -21,7 +21,7 @@ limitations under the License. */ #include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index 0d261059555c971cd509e64802d6c70abc9d2fef..4db30f37a5bc92d4348caed0aebdd8a589b55712 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/Stat.h" diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/gserver/tests/test_PriorBox.cpp index a6d6a242696633e66a05bf9fc9eee81a468ed056..ae0e3bc3d24c54eb84c7b5f5053e629607ef4310 100644 --- a/paddle/gserver/tests/test_PriorBox.cpp +++ b/paddle/gserver/tests/test_PriorBox.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ProtoDataProvider.cpp b/paddle/gserver/tests/test_ProtoDataProvider.cpp index 8fc0aaab69548ae60100696db04d5611570df110..e11bf402c27898b8fdbd3fceeb8aeff8906352db 100644 --- a/paddle/gserver/tests/test_ProtoDataProvider.cpp +++ b/paddle/gserver/tests/test_ProtoDataProvider.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/gserver/dataproviders/ProtoDataProvider.h" #include "paddle/utils/Util.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/gserver/tests/test_PyDataProvider.cpp index 0f264ecf91837f6681f0577b93be7e35be268c04..db883543c306c1938eb9da188ce20ed768018efb 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/gserver/tests/test_PyDataProvider.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/gserver/dataproviders/PyDataProvider.h" #include "paddle/utils/Util.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace std; // NOLINT using namespace paddle; // NOLINT diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index 5f8bc5ecd0f77efc6dcda0330f124ca6cab7f277..7e193eb31a03e6a6b8b0b02e89608a0e02b9e248 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -293,7 +293,7 @@ TEST(PyDataProvider2, can_over_batch_size) { while (true) { int64_t realBatchSize = provider->getNextBatchInternal(batchSize, &batch); if (realBatchSize) { - CHECK_LE(realBatchSize, batchSize); + CHECK_LE(static_cast(realBatchSize), batchSize); } else { break; } diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index f91c788863b6963df92b735dbfef2bacee1fff45..16ab0e6aecb6a895b20389992a44dc542eb3b00a 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/Layer.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index dab6366588b7894a6700c00a5331d436ca2a410c..23ae95852e84216c9065f1b123d35ce868fbb90f 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/WarpCTCLayer.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/math/BaseMatrix.h b/paddle/math/BaseMatrix.h index 8f9bc9e823eb8062535920361899ce3cc06ec3a7..8691c87ac3b88499a9676d59af533e0f4713dfc3 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/math/BaseMatrix.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "TensorExpression.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 4865a081a5aaa010d5b3ce0127ffc6f8330d4a68..dd24f8821d49768354840e0381742218ab9a0204 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -26,8 +26,8 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" #include "Vector.h" +#include "paddle/utils/Common.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/common.h" namespace paddle { @@ -1091,6 +1091,10 @@ public: TensorCpuApply(*this, expr); } } + + bool isEmpty() const { return data_ == nullptr; } + + explicit operator bool() const { return !isEmpty(); } }; inline std::ostream& operator<<(std::ostream& os, const Matrix& mat) { diff --git a/paddle/math/TensorExpression.h b/paddle/math/TensorExpression.h index f3d60e400380f7d7d645559318837b0d7706661d..6fd60e7f3c65ea8e31fd1aaaa61b6ad8956ff1cd 100644 --- a/paddle/math/TensorExpression.h +++ b/paddle/math/TensorExpression.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "hl_tensor_ops.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Logging.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/Vector.h b/paddle/math/Vector.h index b4347a70f874a2a1bf933bbea4d1b15385f36090..9af6e30c9e13895ad95653a787ec1c1ad77a248f 100644 --- a/paddle/math/Vector.h +++ b/paddle/math/Vector.h @@ -21,8 +21,8 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Thread.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index a3ea078509704f305672d0b02d272de0f6c97f51..06fc10bae7232fb1278e89e8d9cbdf477fc27b60 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -7,8 +7,7 @@ add_simple_unittest(test_SparseMatrix) # TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference. add_unittest(test_matrixCompare - test_matrixCompare.cpp - ../../gserver/tests/TestUtil.cpp) + test_matrixCompare.cpp) add_simple_unittest(test_sparseMatrixCompare) add_simple_unittest(test_perturbation) diff --git a/paddle/math/tests/test_FPException.cpp b/paddle/math/tests/test_FPException.cpp index 6aa5891bce922c00cbb4f69a511fb3c42d53f319..3836f7fc0fe577c463c9a476d49b21f2967043e5 100644 --- a/paddle/math/tests/test_FPException.cpp +++ b/paddle/math/tests/test_FPException.cpp @@ -28,10 +28,10 @@ limitations under the License. */ * so we can add some tricks to prevent exp calculate an excessive value. * */ -#include + #include #include "paddle/math/Matrix.h" -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index d490078d909e7940e83a6f461f9386eeda02f53c..e6b5dba446b5a0022ade76b188895c4e0e2a22b4 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -15,9 +15,9 @@ limitations under the License. */ #ifndef PADDLE_ONLY_CPU #include -#include "paddle/gserver/tests/TestUtil.h" #include "paddle/math/Matrix.h" #include "paddle/math/SparseMatrix.h" +#include "paddle/testing/TestUtil.h" #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 98d63438a57b48340bc3b05ac7ac3d6c5cd90fb0..3a780d26c050ac5870824f2ef35c87edc61900a2 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -18,9 +18,9 @@ limitations under the License. */ #include #include "TensorCheck.h" -#include "paddle/gserver/tests/TestUtil.h" #include "paddle/math/Matrix.h" #include "paddle/math/SparseMatrix.h" +#include "paddle/testing/TestUtil.h" #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" diff --git a/paddle/parameter/ParallelParameter.h b/paddle/parameter/ParallelParameter.h index 1ee220d2dc1a26b3f394ca673975cc827f450206..2e7c18b8084dc25b9f2f7630390bb4553ac703c9 100644 --- a/paddle/parameter/ParallelParameter.h +++ b/paddle/parameter/ParallelParameter.h @@ -26,9 +26,9 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdateFunctions.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Flags.h" #include "paddle/utils/Locks.h" -#include "paddle/utils/common.h" #include "ParameterConfig.pb.h" diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index e05137b315f254795de26a5ff0ac977e7968f4d8..72c8336799133ad3f5855b0c1aa06639179ff70a 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -26,11 +26,11 @@ limitations under the License. */ #include "ParameterUpdaterHook.h" #include "paddle/math/Matrix.h" #include "paddle/math/Vector.h" +#include "paddle/utils/Common.h" #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Locks.h" #include "paddle/utils/ThreadLocal.h" #include "paddle/utils/Util.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdateFunctions.h b/paddle/parameter/ParameterUpdateFunctions.h index 2cb379871716ffd9e75eede607276b6b3f200e6b..0fca280149c30f0241ec988dfd6719a5519808f4 100644 --- a/paddle/parameter/ParameterUpdateFunctions.h +++ b/paddle/parameter/ParameterUpdateFunctions.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/math/Vector.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index ccf05ae1ca3ab76fbe9d36237969207768de4dd2..11d7a147bf749ba2de0772b5efd5f73ab0ccdb1a 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -17,8 +17,8 @@ limitations under the License. */ #include "ParameterService.pb.h" #include "paddle/math/Matrix.h" #include "paddle/pserver/ProtoServer.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Queue.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/pserver/ParameterClient2.h b/paddle/pserver/ParameterClient2.h index 70cfc6d70072f399ef97eef1a0e6111a127cbd9f..89b3ddd502151e537b81bdbb09f171dd6e13ba26 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/pserver/ParameterClient2.h @@ -23,11 +23,11 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/pserver/BaseClient.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Flags.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Queue.h" #include "paddle/utils/Util.h" -#include "paddle/utils/common.h" #include "ParameterService.pb.h" diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index 79d1eb97ff149f4f5ca9a924c1b0b7ba629f1e33..0f5a5895907b20a0cf882b6fa6fb74bd52dce058 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -29,10 +29,10 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterOptimizer.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Stat.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/common.h" #include "ParameterService.pb.h" diff --git a/paddle/scripts/docker/Dockerfile b/paddle/scripts/docker/Dockerfile index b01de499bd1fbcfff1f655535f574ae2caa17707..1522be023f6de32f86fc8a367867bbe2f1c9aeb6 100644 --- a/paddle/scripts/docker/Dockerfile +++ b/paddle/scripts/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt-get update \ && apt-get clean -y RUN cd /usr/src/gtest && cmake . && make && cp *.a /usr/lib RUN pip install -U BeautifulSoup docopt PyYAML pillow \ - sphinx sphinx_rtd_theme recommonmark + sphinx sphinx_rtd_theme recommonmark jupyter ARG WITH_AVX ARG WITH_DOC @@ -43,4 +43,13 @@ RUN echo 'root:root' | chpasswd RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config EXPOSE 22 -CMD ["/usr/sbin/sshd", "-D"] + +# Jupyter Notebook directory. +RUN mkdir /notes/ +WORKDIR "/notes" +EXPOSE 8888 + +RUN mkdir -p /opt/bin +COPY ./paddle/scripts/docker/entrypoint /opt/bin/ + +CMD ["/opt/bin/entrypoint"] diff --git a/paddle/scripts/docker/Dockerfile.gpu b/paddle/scripts/docker/Dockerfile.gpu index a68cc79b84271c63d41a89494150381d96748b67..09f07043e2172319de257cc952fb81ba53ce89a5 100644 --- a/paddle/scripts/docker/Dockerfile.gpu +++ b/paddle/scripts/docker/Dockerfile.gpu @@ -15,7 +15,7 @@ RUN apt-get update \ && apt-get clean -y RUN cd /usr/src/gtest && cmake . && make && cp *.a /usr/lib RUN pip install -U BeautifulSoup docopt PyYAML pillow \ - sphinx sphinx_rtd_theme recommonmark + sphinx sphinx_rtd_theme recommonmark jupyter ARG WITH_AVX ARG WITH_DOC @@ -43,4 +43,13 @@ RUN echo 'root:root' | chpasswd RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config EXPOSE 22 -CMD ["/usr/sbin/sshd", "-D"] + +# Jupyter Notebook directory. +RUN mkdir /notes/ +WORKDIR "/notes" +EXPOSE 8888 + +RUN mkdir -p /opt/bin +COPY ./paddle/scripts/docker/entrypoint /opt/bin/ + +CMD ["/opt/bin/entrypoint"] diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ca3f1c3f1896feaae657f47c121ce6cd858dc2c9..7edba3dd09cdc594383597ac7cf7913d50e9f6e1 100755 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -43,5 +43,7 @@ cp -rv /woboq/data $WOBOQ_OUT/../data -o $WOBOQ_OUT \ -p paddle:/paddle /woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT - +cd /woboq +make clean +rm -rf /paddle/build trap : 0 diff --git a/paddle/scripts/docker/entrypoint b/paddle/scripts/docker/entrypoint new file mode 100755 index 0000000000000000000000000000000000000000..87083467f50acd689ce57b86951f5f7a03c6a58b --- /dev/null +++ b/paddle/scripts/docker/entrypoint @@ -0,0 +1,8 @@ +#!/bin/bash +LOG=/var/log/all + +touch $LOG + +/usr/sbin/sshd -D >> $LOG & +jupyter notebook --ip=0.0.0.0 /notes/ >> $LOG & +tail -f $LOG diff --git a/paddle/scripts/travis/before_install.linux.sh b/paddle/scripts/travis/before_install.linux.sh deleted file mode 100755 index 9620bff6bcf77c6e87f149e8e33408170dd8e507..0000000000000000000000000000000000000000 --- a/paddle/scripts/travis/before_install.linux.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e -pip install protobuf -cd /tmp -wget https://github.com/google/protobuf/archive/v3.0.2.tar.gz -O protobuf.tar.gz -tar xf protobuf.tar.gz -cd protobuf* -./autogen.sh -./configure --prefix=/usr/ -make -j 2 install -cd .. -rm -rf protobuf* - -pushd /usr/src/gtest -cmake . -make -sudo cp *.a /usr/lib -popd diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh index bd88ed39132f19ca7cfc4f0dd6acdbc6b83e94ab..7036f971fdd7bac68b67c7b5a92e50c352e214c1 100755 --- a/paddle/scripts/travis/before_install.osx.sh +++ b/paddle/scripts/travis/before_install.osx.sh @@ -3,10 +3,4 @@ brew update brew tap homebrew/science brew install python sudo pip install --upgrade protobuf -brew install cmake python glog gflags openblas wget md5sha1sum protobuf - -wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz -O gtest.tar.gz -tar xf gtest.tar.gz -cd googletest-release-1.8.0/ -cmake . -make install +brew install swig openblas md5sha1sum protobuf diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index 9caeb21beb15ee5281f9a6aefcfd59b94b91e48a..fd3aeb02b21d659f783702905117fc838b93eafd 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -1,27 +1,19 @@ #!/bin/bash -./build_submodules.sh source ./common.sh -CMAKE_EXTRA="" -if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then - CMAKE_EXTRA="-DPYTHON_LIBRARY=/usr/local/Cellar/python/2.7.12_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config/libpython2.7.dylib" -else - CMAKE_EXTRA="-DWITH_SWIG_PY=ON" -fi - - -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -DON_COVERALLS=ON ${CMAKE_EXTRA} NPROC=1 if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + export PYTHONPATH=/opt/python/2.7.12/lib/python2.7/site-packages + export PYTHONHOME=/opt/python/2.7.12 + export PATH=/opt/python/2.7.12/bin:${PATH} + cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS} NRPOC=`nproc` make -j $NPROC make coveralls + sudo make install elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + export PYTHONPATH=/usr/local/lib/python2.7/site-packages + cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS} NPROC=`sysctl -n hw.ncpu` make -j $NPROC - env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" fi - - -sudo make install -sudo paddle version diff --git a/paddle/scripts/travis/build_submodules.sh b/paddle/scripts/travis/build_submodules.sh deleted file mode 100755 index d458bf92bf455609de601c60402101d09765dfe4..0000000000000000000000000000000000000000 --- a/paddle/scripts/travis/build_submodules.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e -WORK_DIR=$PWD -PROJ_ROOT=$(git rev-parse --show-cdup) -SUBMODULES=$(grep path ${PROJ_ROOT}.gitmodules | sed 's/^.*path = //') - -for module in $SUBMODULES -do - case $module in - "warp-ctc") - if [ -d ${PROJ_ROOT}warp-ctc/build ]; then - rm -rf ${PROJ_ROOT}warp-ctc/build - fi - mkdir ${PROJ_ROOT}warp-ctc/build - cd ${PROJ_ROOT}warp-ctc/build - cmake ..; make - ;; - esac -done -cd $WORK_DIR diff --git a/paddle/scripts/travis/common.sh b/paddle/scripts/travis/common.sh index 9b6e420ca7931f0d17da461c7579bf4dc69e18e0..f05c7530a3b0632948e4b18c477d6dc6aad04c03 100755 --- a/paddle/scripts/travis/common.sh +++ b/paddle/scripts/travis/common.sh @@ -2,3 +2,5 @@ set -e mkdir -p ../../../build cd ../../../build +mkdir -p $HOME/third_party +EXTRA_CMAKE_OPTS="-DTHIRD_PARTY_PATH=${HOME}/third_party" diff --git a/paddle/scripts/travis/docs.sh b/paddle/scripts/travis/docs.sh index 8690fe1d40c935e119fefbc02f3a228d76d8c0f9..bdafb145bcd4e5990f382bb890f804687c474f7c 100755 --- a/paddle/scripts/travis/docs.sh +++ b/paddle/scripts/travis/docs.sh @@ -4,7 +4,7 @@ source ./common.sh # Compile Documentation only. -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=ON +cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=ON ${EXTRA_CMAKE_OPTS} make paddle_docs paddle_docs_cn # check websites for broken links diff --git a/paddle/setup.py.in b/paddle/setup.py.in index 464ad632868bd1fd4d88547212421302ca0b2116..e3650bf1c0c4692a50e9731fcd8b832865eaac62 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -14,7 +14,9 @@ # This file is used to build paddle python binding package. # It will be invoked by Makefile that generated by COMAKE + from setuptools import setup, Extension + import numpy as np import api.paddle_ld_flags import platform diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 584498c8602ee5faad3e21a8588af7bb802d7377..c47add04b081cbdf78b5a5d3bca3a71025b3d9ac 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -3,4 +3,6 @@ if(WITH_TESTING) add_library(paddle_test_main STATIC TestMain.cpp) add_dependencies(paddle_test_main gen_proto_cpp) + add_library(paddle_test_util STATIC TestUtil.cpp) + add_dependencies(paddle_test_util gen_proto_cpp) endif() diff --git a/paddle/gserver/tests/TestUtil.cpp b/paddle/testing/TestUtil.cpp similarity index 100% rename from paddle/gserver/tests/TestUtil.cpp rename to paddle/testing/TestUtil.cpp diff --git a/paddle/gserver/tests/TestUtil.h b/paddle/testing/TestUtil.h similarity index 100% rename from paddle/gserver/tests/TestUtil.h rename to paddle/testing/TestUtil.h diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp index 09e0a213ab2d71890cfafb905b5969383acfe95a..8465addaf9e03831e914be2c73901c3b1a9d537f 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/trainer/Trainer.cpp @@ -14,7 +14,6 @@ limitations under the License. */ #include "Trainer.h" -#include #include #include @@ -24,7 +23,7 @@ limitations under the License. */ #include -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Stat.h" diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/trainer/TrainerMain.cpp index 947f9cadcc983d58ce31ef462e51dc42e41eaf1b..e2fbd21e14afa7c89b82999b08bf91c1de182906 100644 --- a/paddle/trainer/TrainerMain.cpp +++ b/paddle/trainer/TrainerMain.cpp @@ -12,9 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "paddle/pserver/ParameterServer2.h" -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/StringUtil.h" diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/trainer/tests/CMakeLists.txt index 28c3d6f2631f9e28e3f1ff086b1e8edf994e73a4..22e07bd0e98a4cd36e6ed5860bcff0d4ae7cb1d2 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/trainer/tests/CMakeLists.txt @@ -17,9 +17,10 @@ add_test(NAME test_Compare ################# test_Trainer ########################### add_unittest_without_exec(test_Trainer test_Trainer.cpp) -set(diy_dll_dir ${CMAKE_CURRENT_BINARY_DIR}/../../gserver/tests) add_test(NAME test_Trainer COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/paddle/trainer/tests/gen_proto_data.py && + ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_Trainer WORKING_DIRECTORY ${PROJ_ROOT}/paddle/) @@ -82,5 +83,5 @@ add_test(NAME test_PyDataProviderWrapper #################### test_config_parser ######################### add_test(NAME test_config_parser COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - python ${PROJ_ROOT}/paddle/trainer/tests/config_parser_test.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/paddle/trainer/tests/config_parser_test.py WORKING_DIRECTORY ${PROJ_ROOT}/paddle/) diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp index 371282dd6bb9a995bc6ae8b2a5bd708f831d7e33..264bc46ebcd0aa17fd605e537fcb2c316ef31162 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/trainer/tests/test_Trainer.cpp @@ -96,11 +96,6 @@ TEST(checkGradient, multi) { TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); } TEST(checkGradient, chunk) { -#if defined(__APPLE__) || defined(__OSX__) - EXPECT_EQ(0, system("python trainer/tests/gen_proto_data.py")); -#else - EXPECT_EQ(0, system("python2 trainer/tests/gen_proto_data.py")); -#endif checkGradientTest(configFile3, false, false); #ifndef PADDLE_ONLY_CPU checkGradientTest(configFile3, true, true); diff --git a/paddle/utils/.gitignore b/paddle/utils/.gitignore index f2cfd7409412de68f4183daebcb48e7a3ae37672..956b606a18cae1bb11322accfa174ae5ce1580de 100644 --- a/paddle/utils/.gitignore +++ b/paddle/utils/.gitignore @@ -1 +1,2 @@ enable_virtualenv.c +PythonUtil.cpp diff --git a/paddle/utils/CMakeLists.txt b/paddle/utils/CMakeLists.txt index 45240b5002aa18be4a9b7e3ec3b754eb83ca0e09..10d906ee16656a808122b81d8b2fef55b8e7b7e9 100644 --- a/paddle/utils/CMakeLists.txt +++ b/paddle/utils/CMakeLists.txt @@ -1,5 +1,7 @@ # The utilities for paddle +configure_file(PythonUtil.cpp.in ${PROJ_ROOT}/paddle/utils/PythonUtil.cpp) + file(GLOB UTIL_HEADERS . *.h) file(GLOB UTIL_SOURCES . *.cpp) create_resources(enable_virtualenv.py enable_virtualenv.c) diff --git a/paddle/utils/common.h b/paddle/utils/Common.h similarity index 97% rename from paddle/utils/common.h rename to paddle/utils/Common.h index 202a9d980d8350c230daaf473dd34d4069479e5f..1f1d0255a5eaef824171ddeaf9480167f232007e 100644 --- a/paddle/utils/common.h +++ b/paddle/utils/Common.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include "Excepts.h" + /** * Disable copy macro. */ diff --git a/paddle/utils/CpuId.h b/paddle/utils/CpuId.h index 1218e8194c4e837ca880744f92e769a68ba474de..0f3985cc7b2c018ede9bba9644d2d096561dccee 100644 --- a/paddle/utils/CpuId.h +++ b/paddle/utils/CpuId.h @@ -11,7 +11,7 @@ limitations under the License. */ #pragma once -#include "common.h" +#include "Common.h" namespace paddle { diff --git a/paddle/utils/Excepts.h b/paddle/utils/Excepts.h index dc3369b7e8c27cf53a03ce56b18a123f291d2d6d..5c2c504f53a586f2991ccfae891991465fdb39b6 100644 --- a/paddle/utils/Excepts.h +++ b/paddle/utils/Excepts.h @@ -15,6 +15,8 @@ limitations under the License. */ #ifndef EXCEPTS_H_ #define EXCEPTS_H_ +#include + #if defined(__APPLE__) || defined(__OSX__) int fegetexcept(void); diff --git a/paddle/utils/Locks.h b/paddle/utils/Locks.h index a21872e89ebc172b87c8b5c3731a89302f34f521..e87abb9139f1c3f250f8b8fe1afdd8883f682647 100644 --- a/paddle/utils/Locks.h +++ b/paddle/utils/Locks.h @@ -19,7 +19,7 @@ limitations under the License. */ #include #include -#include "common.h" +#include "Common.h" namespace paddle { diff --git a/paddle/utils/PythonUtil.cpp b/paddle/utils/PythonUtil.cpp.in similarity index 98% rename from paddle/utils/PythonUtil.cpp rename to paddle/utils/PythonUtil.cpp.in index 7faeff55c28b9065179ad27b3b604a9f411249e5..66b5795e29fb9fa751ed802e87ced0a71aea4c51 100644 --- a/paddle/utils/PythonUtil.cpp +++ b/paddle/utils/PythonUtil.cpp.in @@ -195,6 +195,10 @@ extern const char enable_virtualenv_py[]; } void initPython(int argc, char** argv) { #ifndef PADDLE_NO_PYTHON + char pyHome[] = "@PYTHON_INSTALL_DIR@"; // NOLINT + if (strlen(pyHome)) { + Py_SetPythonHome(pyHome); + } Py_SetProgramName(argv[0]); Py_Initialize(); PySys_SetArgv(argc, argv); diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index dc15ada5862d648af27aa1b0e8c8a5cce012ded8..613844669d2495ada7b8f7a841f47b821b7fdeba 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -26,9 +26,9 @@ limitations under the License. */ #include #include +#include "Common.h" #include "Logging.h" #include "TrainerConfig.pb.h" -#include "common.h" #include "Flags.h" #include "hl_gpu.h" diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index aa5df3243893145dbcc7e7ef2592555fc1c88fc9..f53d6420bbbdf66f8f355af95c6b11c30a3bfab9 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include "common.h" +#include "Common.h" namespace paddle { diff --git a/paddle/utils/Excepts.cpp b/paddle/utils/arch/osx/Excepts.cpp similarity index 97% rename from paddle/utils/Excepts.cpp rename to paddle/utils/arch/osx/Excepts.cpp index 4ddce35ed31a8fed3f25cb3b03348b4eda8fcfdd..c8e904d8f9fe29e51447994af43dc62bf3514306 100644 --- a/paddle/utils/Excepts.cpp +++ b/paddle/utils/arch/osx/Excepts.cpp @@ -12,12 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "Excepts.h" +#include "paddle/utils/Excepts.h" #if defined(__APPLE__) || defined(__OSX__) -#include - int fegetexcept(void) { static fenv_t fenv; return fegetenv(&fenv) ? -1 : (fenv.__control & FE_ALL_EXCEPT); diff --git a/proto/CMakeLists.txt b/proto/CMakeLists.txt index 2c40070eca44d8656d7ce82157a1b840092b9965..e854b2b427e550ec491dacf931cc2d2cce7ba6c2 100644 --- a/proto/CMakeLists.txt +++ b/proto/CMakeLists.txt @@ -18,10 +18,10 @@ foreach(filename ${proto_filenames}) ${PROTO_GEN} ${CUR_PROTO_GEN}) add_custom_command(OUTPUT ${CUR_PROTO_GEN} - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + COMMAND env ${py_env} ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${CMAKE_CURRENT_BINARY_DIR} - --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} - DEPENDS ${filename}) + --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} + DEPENDS ${filename} ${external_project_dependencies}) set(CUR_PROTO_GEN_PY ${PROJ_ROOT}/paddle/python/paddle/proto/${base_filename}_pb2.py) @@ -29,9 +29,9 @@ foreach(filename ${proto_filenames}) ${CUR_PROTO_GEN_PY} ${PROTO_GEN_PY}) add_custom_command(OUTPUT ${CUR_PROTO_GEN_PY} - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${PROJ_ROOT}/python/paddle/proto - --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} - DEPENDS ${filename}) + COMMAND env ${py_env} ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${PROJ_ROOT}/python/paddle/proto + --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} + DEPENDS ${filename} ${external_project_dependencies}) endforeach() include_directories(${CMAKE_CURRENT_BINARY_DIR}/proto) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index dce0b909524369926eda54763e571706b79daeaf..1cda4762eb2a55175d6c9faee98aaeaa1f763890 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -10,26 +10,17 @@ set(PY_FILES paddle/__init__.py ${HELPERS_PY_FILES} ${UTILS_PY_FILES}) -set(PADDLE_INTERNAL_PACKAGE "") -if (PADDLE_WITH_INTERNAL) - set(PADDLE_INTERNAL_PACKAGE "paddle.internals") -endif() - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp - COMMAND ${PYTHON_EXECUTABLE} setup.py bdist_wheel + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT_DIR}/.timestamp - DEPENDS gen_proto_py ${PY_FILES}) + DEPENDS gen_proto_py ${PY_FILES} ${external_project_dependencies}) add_custom_target(paddle_python ALL DEPENDS ${OUTPUT_DIR}/.timestamp) -find_python_module(pip REQUIRED) -find_python_module(wheel REQUIRED) -find_python_module(google.protobuf REQUIRED) - add_subdirectory(paddle/trainer_config_helpers/tests) install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dist/ diff --git a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt index d1a9843d326669711bf3d0769df1b804cfcfa673..403aafabe9143472dd2f0857ecd25f7acf515b6c 100644 --- a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt +++ b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt @@ -1,12 +1,12 @@ #################### test_config_parser ######################### add_test(NAME layers_test COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - python ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/layers_test.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/layers_test.py WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) add_test(NAME test_reset_hook COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - python ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) if (PROTOBUF_3) @@ -14,12 +14,12 @@ if (PROTOBUF_3) ProtobufEqualMain.cpp) add_test(NAME test_layerHelpers COMMAND - ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh + ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal ) else() add_test(NAME test_layerHelpers COMMAND - ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh + ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} ) endif() diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index a54af94ce3db4ed300dee697b30516c3b6448d7c..ee5961af75ebb33af52f9add645f793015288f4e 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -10,13 +10,13 @@ protostr=$PWD/protostr for conf in ${configs[*]} do echo "Generating " $conf - python -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest - cat ${conf}.py |python test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest + $1 -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest + cat ${conf}.py |$1 test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest done for conf in ${whole_configs[*]} do echo "Generating " $conf - python -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest - cat ${conf}.py |python test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest + $1 -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest + cat ${conf}.py |$1 test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest done diff --git a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh index e984ee70625456241b3cfe6202fdadaa3807d33c..a37eb6439e6d2803a417883f0aed2a5d56d059b9 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh @@ -7,7 +7,7 @@ protostr=`dirname $0`/protostr files=`ls $protostr | grep -v "unittest"` -./generate_protostr.sh +./generate_protostr.sh $1 . ./file_list.sh diff --git a/python/setup.py.in b/python/setup.py.in index d2fb95f27ff2f0673050e699316dde504dbf28f6..b66a42e87c78701e9eb26b1b7dc8f46a95035a76 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,16 +1,11 @@ from setuptools import setup -INTERNAL_PACKAGE='${PADDLE_INTERNAL_PACKAGE}' - packages=['paddle', 'paddle.proto', 'paddle.trainer', 'paddle.trainer_config_helpers', 'paddle.utils'] -if len(INTERNAL_PACKAGE) != 0: - packages.append(INTERNAL_PACKAGE) - setup(name='paddle', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', diff --git a/warp-ctc b/warp-ctc deleted file mode 160000 index bd535c8d44e03c8ebd2d768e06c8c05fdccd11d2..0000000000000000000000000000000000000000 --- a/warp-ctc +++ /dev/null @@ -1 +0,0 @@ -Subproject commit bd535c8d44e03c8ebd2d768e06c8c05fdccd11d2