diff --git a/.dockerignore b/.dockerignore deleted file mode 120000 index 3e4e48b0b5fe6b468434d6767749b399319f2da2..0000000000000000000000000000000000000000 --- a/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -.gitignore \ No newline at end of file diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..2b2e74053d33cb6d2878fd3d6da48fa344172f63 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,15 @@ +*.DS_Store +build/ +*.user +.vscode +.idea +.project +.cproject +.pydevproject +Makefile +.test_env/ +third_party/ +*~ +bazel-* + +!build/*.deb diff --git a/.gitignore b/.gitignore index 6aae076a49012b032b8fc0f1dc02c2714fb7b4a3..ee7c6ec370cd7c1f3435b41d915e24023c456af7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ build/ .project .cproject .pydevproject +.settings/ Makefile .test_env/ third_party/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a6e45028ebc3f53ea20806f0dd2a7acc820607fe..3402223b044b8950e7772f4d87cc64e5772f8dcd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,12 +2,12 @@ sha: c25201a00e6b0514370501050cf2a8538ac12270 hooks: - id: remove-crlf - files: (?!.*third_party)^.*$ + files: (?!.*third_party)^.*$ | (?!.*book)^.*$ - repo: https://github.com/reyoung/mirrors-yapf.git sha: v0.13.2 hooks: - id: yapf - files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ # Bazel BUILD files follow Python syntax. + files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ - repo: https://github.com/pre-commit/pre-commit-hooks sha: 7539d8bd1a00a3c1bfd34cdb606d3a6372e83469 hooks: @@ -15,7 +15,7 @@ - id: check-merge-conflict - id: check-symlinks - id: detect-private-key - files: (?!.*third_party)^.*$ + files: (?!.*third_party)^.*$ | (?!.*book)^.*$ - id: end-of-file-fixer - repo: https://github.com/PaddlePaddle/clang-format-pre-commit-hook.git sha: 28c0ea8a67a3e2dbbf4822ef44e85b63a0080a29 diff --git a/.travis.yml b/.travis.yml index 4fb2ca938795bb6a69f7d7991aee9f7386947bf2..5a7f45a748ac7e81f3f90c245bcf2cd84c4e9027 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,22 +4,14 @@ cache: - $HOME/third_party - $HOME/.ccache - $HOME/.cache/pip - - $HOME/Library/Caches/Homebrew sudo: required dist: trusty os: - linux - - osx env: - JOB=DOCS - JOB=BUILD_AND_TEST - JOB=PRE_COMMIT -matrix: - exclude: - - os: osx - env: JOB=DOCS # Only generate documentation in linux. - - os: osx - env: JOB=PRE_COMMIT # Only check pre-commit hook in linux addons: apt: @@ -53,11 +45,10 @@ before_install: fi fi fi - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # protobuf version. - - pip install numpy wheel 'protobuf==3.1' sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker + - pip install numpy wheel 'protobuf==3.1' sphinx recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh notifications: diff --git a/CMakeLists.txt b/CMakeLists.txt index 1bb18bb23ae37ec074fc013de0ae41da5b962264..1a59db8c71bf3b1ea472c1ee56a1cd97de42dad8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,17 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License -cmake_minimum_required(VERSION 3.0) - set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(PROJ_ROOT ${CMAKE_SOURCE_DIR}) include(system) +if(ANDROID) + cmake_minimum_required(VERSION 3.7) +else() + cmake_minimum_required(VERSION 3.0) +endif() + project(paddle CXX C) find_package(Sphinx) -find_package(CUDA QUIET) +if(NOT CMAKE_CROSSCOMPILING) + find_package(CUDA QUIET) +endif(NOT CMAKE_CROSSCOMPILING) find_package(Git REQUIRED) find_package(Threads REQUIRED) @@ -41,7 +47,7 @@ option(WITH_RDMA "Compile PaddlePaddle with RDMA support" OFF) option(WITH_TIMER "Compile PaddlePaddle with stats timer" OFF) option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler" OFF) option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) -option(ON_COVERALLS "Compile PaddlePaddle with code coverage" OFF) +option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) @@ -52,6 +58,21 @@ if(NOT CMAKE_BUILD_TYPE) FORCE) endif() +if(ANDROID) + if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") + message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 21") + endif() + + set(WITH_GPU OFF CACHE STRING + "Disable GPU when cross-compiling for Android" FORCE) + set(WITH_AVX OFF CACHE STRING + "Disable AVX when cross-compiling for Android" FORCE) + set(WITH_PYTHON OFF CACHE STRING + "Disable PYTHON when cross-compiling for Android" FORCE) + set(WITH_RDMA OFF CACHE STRING + "Disable RDMA when cross-compiling for Android" FORCE) +endif(ANDROID) + set(THIRD_PARTY_PATH "${PROJ_ROOT}/third_party" CACHE STRING "A path setting third party libraries download & build directories.") ######################################################################################## @@ -65,6 +86,7 @@ include(external/python) # download, build, install python include(external/openblas) # download, build, install openblas include(external/swig) # download, build, install swig include(external/warpctc) # download, build, install warpctc +include(external/any) # download libn::any include(package) # set paddle packages include(cpplint) # set paddle c++ style @@ -75,7 +97,6 @@ include(flags) # set paddle compile flags include(cudnn) # set cudnn libraries include(version) # set PADDLE_VERSION include(coveralls) # set code coverage - include(configure) # add paddle env configuration include_directories("${PROJ_ROOT}") @@ -83,14 +104,21 @@ include_directories("${PROJ_ROOT}/paddle/cuda/include") include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto") set(EXTERNAL_LIBS - # have not include gtest here. ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES} ${CBLAS_LIBRARIES} ${PROTOBUF_LIBRARY} ${ZLIB_LIBRARIES} + ${PYTHON_LIBRARIES} ) +if(WITH_GPU) + list(APPEND EXTERNAL_LIB ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY}) + if(NOT WITH_DSO) + list(APPEND EXTERNAL_LIB ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY}) + endif(NOT WITH_DSO) +endif(WITH_GPU) + add_subdirectory(proto) add_subdirectory(paddle) add_subdirectory(python) diff --git a/paddle/scripts/docker/Dockerfile b/Dockerfile similarity index 54% rename from paddle/scripts/docker/Dockerfile rename to Dockerfile index 27d19dc15c8bd64832243d75bf22f0e8adeecbd1..97947adf4501443a7cfedb4a83963f1f1519668c 100644 --- a/paddle/scripts/docker/Dockerfile +++ b/Dockerfile @@ -1,45 +1,55 @@ -FROM ubuntu:14.04 +# A image for building paddle binaries +# Use cuda devel base image for both cpu and gpu environment +FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Authors -ARG DEBIAN_FRONTEND=noninteractive ARG UBUNTU_MIRROR -RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' +RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' + +# ENV variables +ARG WITH_GPU +ARG WITH_AVX +ARG WITH_DOC +ARG WITH_STYLE_CHECK + +ENV WOBOQ OFF +ENV WITH_GPU=${WITH_AVX:-OFF} +ENV WITH_AVX=${WITH_AVX:-ON} +ENV WITH_DOC=${WITH_DOC:-OFF} +ENV WITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} + +ENV HOME /root +# Add bash enhancements +COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ apt-get install -y git python-pip python-dev openssh-server bison && \ apt-get install -y wget unzip tar xz-utils bzip2 gzip coreutils && \ apt-get install -y curl sed grep graphviz libjpeg-dev zlib1g-dev && \ apt-get install -y python-numpy python-matplotlib gcc g++ gfortran && \ - apt-get install -y automake && \ + apt-get install -y automake locales clang-format-3.8 swig && \ apt-get clean -y +# git credential to skip password typing +RUN git config --global credential.helper store + +# Fix locales to en_US.UTF-8 +RUN localedef -i en_US -f UTF-8 en_US.UTF-8 + +# FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter +# version util jupyter fixes this issue. RUN pip install --upgrade pip && \ - pip install -U "protobuf==3.1.0" && \ + pip install -U 'protobuf==3.1.0' && \ pip install -U wheel pillow BeautifulSoup && \ pip install -U docopt PyYAML sphinx && \ - pip install -U sphinx_rtd_theme recommonmark jupyter + pip install -U sphinx-rtd-theme==0.1.9 recommonmark && \ + pip install pre-commit 'requests==2.9.2' 'ipykernel==4.6.0' 'jupyter==1.0.0' RUN curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \ cd cmake-3.4.1 && ./bootstrap && make -j `nproc` && make install && \ cd .. && rm -rf cmake-3.4.1 -ARG BUILD_WOBOQ -ARG BUILD_AND_INSTALL -ARG WITH_AVX -ARG WITH_DOC -ARG WITH_STYLE_CHECK - -ENV BUILD_WOBOQ=${BUILD_WOBOQ:-OFF} -ENV BUILD_AND_INSTALL=${BUILD_AND_INSTALL:-OFF} -ENV WITH_GPU=OFF -ENV WITH_AVX=${WITH_AVX:-ON} -ENV WITH_DOC=${WITH_DOC:-OFF} -ENV WITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} - -RUN mkdir /paddle -COPY . /paddle/ -RUN /paddle/paddle/scripts/docker/build.sh -VOLUME ["/usr/share/nginx/html/data", "/usr/share/nginx/html/paddle"] +VOLUME ["/woboq_out"] # Configure OpenSSH server. c.f. https://docs.docker.com/engine/examples/running_ssh_service RUN mkdir /var/run/sshd @@ -48,12 +58,5 @@ RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config EXPOSE 22 -# Jupyter Notebook directory. -RUN mkdir /notes/ -WORKDIR "/notes" -EXPOSE 8888 - -RUN mkdir -p /opt/bin -COPY ./paddle/scripts/docker/entrypoint /opt/bin/ - -CMD ["/opt/bin/entrypoint"] +# development image default do build work +CMD ["bash", "/paddle/paddle/scripts/docker/build.sh"] diff --git a/README.md b/README.md index 8a8e15841586ae6a01bb93e94f6074189f556f5a..bcc24b84128df282a2e3f0bc62aafe1ffe172338 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,8 @@ [![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle) -[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/) -[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/cn/index.html) +[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/develop/doc/) +[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/doc_cn/) [![Coverage Status](https://coveralls.io/repos/github/PaddlePaddle/Paddle/badge.svg?branch=develop)](https://coveralls.io/github/PaddlePaddle/Paddle?branch=develop) [![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases) [![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) @@ -59,36 +59,36 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl the capability of PaddlePaddle to make a huge impact for your product. ## Installation -Check out the [Install Guide](http://paddlepaddle.org/doc/build/) to install from -pre-built packages (**docker image**, **deb package**) or -directly build on **Linux** and **Mac OS X** from the source code. + +It is recommended to check out the +[Docker installation guide](http://www.paddlepaddle.org/develop/doc/getstarted/build_and_install/docker_install_en.html) +before looking into the +[build from source guide](http://www.paddlepaddle.org/develop/doc/getstarted/build_and_install/build_from_source_en.html) ## Documentation -Both [English Docs](http://paddlepaddle.org/doc/) and [Chinese Docs](http://paddlepaddle.org/doc_cn/) are provided for our users and developers. -- [Quick Start](http://paddlepaddle.org/doc/demo/quick_start/index_en)
- You can follow the quick start tutorial to learn how use PaddlePaddle - step-by-step. +We provide [English](http://www.paddlepaddle.org/develop/doc/) and +[Chinese](http://www.paddlepaddle.org/doc_cn/) documentation. + +- [Deep Learning 101](http://book.paddlepaddle.org/index.en.html) + + You might want to start from the this online interactive book that can run in Jupyter Notebook. + +- [Distributed Training](http://www.paddlepaddle.org/develop/doc/howto/usage/cluster/cluster_train_en.html) + + You can run distributed training jobs on MPI clusters. + +- [Distributed Training on Kubernetes](http://www.paddlepaddle.org/develop/doc/howto/usage/k8s/k8s_en.html) -- [Example and Demo](http://paddlepaddle.org/doc/demo/)
- We provide five demos, including: image classification, sentiment analysis, - sequence to sequence model, recommendation, semantic role labeling. + You can also run distributed training jobs on Kubernetes clusters. -- [Distributed Training](http://paddlepaddle.org/doc/cluster)
- This system supports training deep learning models on multiple machines - with data parallelism. +- [Python API](http://www.paddlepaddle.org/develop/doc/api/index_en.html) -- [Python API](http://paddlepaddle.org/doc/ui/)
- PaddlePaddle supports using either Python interface or C++ to build your - system. We also use SWIG to wrap C++ source code to create a user friendly - interface for Python. You can also use SWIG to create interface for your - favorite programming language. + Our new API enables much shorter programs. -- [How to Contribute](http://paddlepaddle.org/doc/build/contribute_to_paddle.html)
- We sincerely appreciate your interest and contributions. If you would like to - contribute, please read the contribution guide. +- [How to Contribute](http://www.paddlepaddle.org/develop/doc/howto/dev/contribute_to_paddle_en.html) -- [Source Code Documents](http://paddlepaddle.org/doc/source/)
+ We appreciate your contributions! ## Ask Questions diff --git a/authors b/authors index ab4d3118ff1f7e94677c89073c4ea05bf991165e..daac4ec5d8173cba95df9f9b3c69c02b5256f5b2 100644 --- a/authors +++ b/authors @@ -29,13 +29,16 @@ Luo, Tao Lyu, Qin Mao, Hongyue Qian, Xiaojun +Qiao, Longfei Qi, Jun Qin, Duohao Shen, Guolong Shi, Guangchuan Song, Xiang +Wang, Helin Wang, Jiang Wang, Yanfei +Wang, Yi Wang, Yong Weng, Renliang Xu, Tianbing diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake index d319442ef10b38b9edf5844e5540a92c7094c7ce..f74cd4ff8c9c2c52319b18ac37264167b3718eae 100644 --- a/cmake/FindSphinx.cmake +++ b/cmake/FindSphinx.cmake @@ -72,7 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination ) ${source} ${destination} COMMENT "Generating sphinx documentation: ${builder}" - COMMAND ln -sf ${destination}/index_*.html ${destination}/index.html + COMMAND cd ${destination} && ln -sf ./index_*.html index.html ) set_property( diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 235c95f017f2b6ef24195a0210ccafff36b6ed61..b8bf1bb07a1f779354b2c10071264bf41d279f6c 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -19,9 +19,9 @@ set(CBLAS_FOUND OFF) set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs") set(MKL_ROOT ${INTEL_ROOT}/mkl CACHE PATH "Folder contains MKL") -find_path(MKL_INCLUDE_DIR mkl.h PATHS +find_path(MKL_INC_DIR mkl.h PATHS ${MKL_ROOT}/include) -find_path(MKL_INCLUDE_DIR mkl_lapacke.h PATHS +find_path(MKL_LAPACK_INC_DIR mkl_lapacke.h PATHS ${MKL_ROOT}/include) find_library(MKL_CORE_LIB NAMES mkl_core PATHS ${MKL_ROOT}/lib @@ -34,15 +34,19 @@ find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS ${MKL_ROOT}/lib/intel64) -if(MKL_INCLUDE_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64) +if(MKL_INC_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64) set(CBLAS_PROVIDER MKL) - set(CBLAS_INC_DIR ${MKL_INCLUDE_DIR}) + set(CBLAS_INC_DIR ${MKL_INC_DIR}) set(CBLAS_LIBRARIES ${MKL_INTEL_LP64} ${MKL_SEQUENTIAL_LIB} ${MKL_CORE_LIB}) add_definitions(-DPADDLE_USE_MKL) message(STATUS "Found MKL (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") set(CBLAS_FOUND ON) + if(${MKL_LAPACK_INC_DIR}) + add_definitions(-DPADDLE_USE_LAPACK) + message(STATUS "Found lapack in MKL (include: ${MKL_LAPACK_INC_DIR})") + endif() return() # return file. endif() @@ -68,13 +72,17 @@ find_library(ATLAS_CBLAS_LIB NAMES cblas libcblas.so.3 find_library(ATLAS_LIB NAMES lapack_atlas liblapack_atlas.so.3 PATHS ${ATLAS_LIB_SEARCH_PATHS}) -if(ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_LIB) +if(ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_LIB AND NOT CBLAS_FOUND) set(CBLAS_PROVIDER ATLAS) - set(CBLAS_INC_DIR ${ATLAS_INC_DIR} ${ATLAS_CLAPACK_INC_DIR}) + set(CBLAS_INC_DIR ${ATLAS_INC_DIR}) set(CBLAS_LIBRARIES ${ATLAS_LIB} ${ATLAS_CBLAS_LIB}) add_definitions(-DPADDLE_USE_ATLAS) - message(STATUS "Found Atlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + message(STATUS "Found ATLAS (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") set(CBLAS_FOUND ON) + if(ATLAS_CLAPACK_INC_DIR) + add_definitions(-DPADDLE_USE_LAPACK) + message(STATUS "Found lapack in ATLAS (include: ${ATLAS_CLAPACK_INC_DIR})") + endif() return() endif() @@ -103,8 +111,12 @@ if(OPENBLAS_INC_DIR AND OPENBLAS_LIB) set(CBLAS_PROVIDER OPENBLAS) set(CBLAS_INC_DIR ${OPENBLAS_INC_DIR}) set(CBLAS_LIBRARIES ${OPENBLAS_LIB}) - message(STATUS "Found OpenBlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + message(STATUS "Found OpenBLAS (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") set(CBLAS_FOUND ON) + if(OPENBLAS_LAPACKE_INC_DIR) + add_definitions(-DPADDLE_USE_LAPACK) + message(STATUS "Found lapack in OpenBLAS (include: ${OPENBLAS_LAPACKE_INC_DIR})") + endif() return() endif() diff --git a/cmake/ccache.cmake b/cmake/ccache.cmake index 968d41801d73c4082d2673efe415c1cdd0305b5e..900f59d4cb83bc9ce1893b2d3bd95f5a08b164bb 100644 --- a/cmake/ccache.cmake +++ b/cmake/ccache.cmake @@ -1,9 +1,9 @@ # Use ccache if found ccache program -find_program(CCACHE_FOUND ccache) +find_program(CCACHE_PATH ccache) -if(CCACHE_FOUND) +if(CCACHE_PATH) message(STATUS "Ccache is founded, use ccache to speed up compile.") - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) -endif(CCACHE_FOUND) \ No newline at end of file + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_PATH}) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_PATH}) +endif(CCACHE_PATH) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 0bb016201dd8ae912ac8ec9f925bc5277fad7aed..5e507e78f74eee885922f502f35e3c15fafb622d 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -32,6 +32,14 @@ if(NOT WITH_PROFILER) add_definitions(-DPADDLE_DISABLE_PROFILER) endif(NOT WITH_PROFILER) +if(NOT CMAKE_CROSSCOMPILING) + if(WITH_AVX AND AVX_FOUND) + set(SIMD_FLAG ${AVX_FLAG}) + elseif(SSE3_FOUND) + set(SIMD_FLAG ${SSE3_FLAG}) + endif() +endif() + if(NOT WITH_GPU) add_definitions(-DPADDLE_ONLY_CPU) add_definitions(-DHPPL_STUB_FUNC) @@ -48,21 +56,12 @@ else() message(FATAL_ERROR "Paddle need cudnn to compile") endif() - if(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") - else(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") - endif(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SIMD_FLAG}") # Include cuda and cudnn include_directories(${CUDNN_INCLUDE_DIR}) include_directories(${CUDA_TOOLKIT_INCLUDE}) endif(NOT WITH_GPU) -if(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") -else(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") -endif(WITH_AVX) +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_FLAG}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAG}") diff --git a/cmake/coveralls.cmake b/cmake/coveralls.cmake index 9be7643819efdde3f42e4d39b2849ecc17e0d9fb..ca1471cabb57c0795ee193493d2e60bb5bd9e1cc 100644 --- a/cmake/coveralls.cmake +++ b/cmake/coveralls.cmake @@ -61,7 +61,7 @@ function(code_coverage _COVERAGE_SRCS _COVERALLS_UPLOAD _CMAKE_SCRIPT_PATH) endif() endfunction() -if(ON_COVERALLS) +if(WITH_COVERAGE) set(CMAKE_BUILD_TYPE "Debug") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") diff --git a/cmake/coverallsGcovJsons.cmake b/cmake/coverallsGcovJsons.cmake index ae3530c3a0eeb79ddbcbf4f2e99be75aa7968a2f..4641184fcf5273b884524d9b9444209ffb65e000 100644 --- a/cmake/coverallsGcovJsons.cmake +++ b/cmake/coverallsGcovJsons.cmake @@ -110,14 +110,13 @@ endmacro() # Get the coverage data. file(GLOB_RECURSE GCDA_FILES "${COV_PATH}" "*.gcda") -message("GCDA files:") +message("Process GCDA files:") +message("===============================") # Get a list of all the object directories needed by gcov # (The directories the .gcda files and .o files are found in) # and run gcov on those. foreach(GCDA ${GCDA_FILES}) - message("Process: ${GCDA}") - message("------------------------------------------------------------------------------") get_filename_component(GCDA_DIR ${GCDA} PATH) # @@ -135,7 +134,7 @@ foreach(GCDA ${GCDA_FILES}) # If -p is not specified then the file is named only "the_file.c.gcov" # execute_process( - COMMAND ${GCOV_EXECUTABLE} -p -o ${GCDA_DIR} ${GCDA} + COMMAND ${GCOV_EXECUTABLE} -p -o ${GCDA_DIR} ${GCDA} >/dev/null WORKING_DIRECTORY ${GCDA_DIR} ) endforeach() @@ -383,7 +382,6 @@ foreach(NOT_COVERED_SRC ${COVERAGE_SRCS_REMAINING}) set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]") # Generate the final JSON for this file. - message("Generate JSON for non-gcov file: ${NOT_COVERED_SRC}...") string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON) set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ") endforeach() diff --git a/cmake/cudnn.cmake b/cmake/cudnn.cmake index e5b59be19369d3ba3e852624426b95ae365e7357..af9be86961833dcd62371227165d411a3b61d79e 100644 --- a/cmake/cudnn.cmake +++ b/cmake/cudnn.cmake @@ -1,3 +1,7 @@ +if(NOT WITH_GPU) + return() +endif() + set(CUDNN_ROOT "" CACHE PATH "CUDNN ROOT") find_path(CUDNN_INCLUDE_DIR cudnn.h PATHS ${CUDNN_ROOT} ${CUDNN_ROOT}/include @@ -11,6 +15,7 @@ list(APPEND CUDNN_CHECK_LIBRARY_DIRS ${CUDNN_ROOT} ${CUDNN_ROOT}/lib64 ${CUDNN_ROOT}/lib + ${CUDNN_ROOT}/lib/x86_64-linux-gnu $ENV{CUDNN_ROOT} $ENV{CUDNN_ROOT}/lib64 $ENV{CUDNN_ROOT}/lib diff --git a/cmake/external/any.cmake b/cmake/external/any.cmake new file mode 100644 index 0000000000000000000000000000000000000000..8116f235d535917c03deb646ff4ec083a0cdadc7 --- /dev/null +++ b/cmake/external/any.cmake @@ -0,0 +1,20 @@ +INCLUDE(ExternalProject) + +SET(ANY_SOURCE_DIR ${THIRD_PARTY_PATH}/any) + +INCLUDE_DIRECTORIES(${ANY_SOURCE_DIR}/src/linb_any) + +ExternalProject_Add( + linb_any + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/thelink2012/any.git" + GIT_TAG "8fef1e93710a0edf8d7658999e284a1142c4c020" + PREFIX ${ANY_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +add_definitions(-DANY_IMPL_ANY_CAST_MOVEABLE) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 2a49d76eb30f592a28746f5897b14b7dd319d784..0afb3ab9af48046af01f03838eefa0bd2fcb2821 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -31,9 +31,17 @@ ExternalProject_Add( GIT_REPOSITORY "https://github.com/gflags/gflags.git" PREFIX ${GFLAGS_SOURCES_DIR} UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR} CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DBUILD_TESTING=OFF + CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR} + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=Release ) LIST(APPEND external_project_dependencies gflags) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index ab105611c812a4f4b642ac5b1213fdfe93fab97d..4a9e2ecc6bbe74c5856a55fb0c982777d7ac25b7 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -33,11 +33,19 @@ ExternalProject_Add( GIT_REPOSITORY "https://github.com/google/glog.git" PREFIX ${GLOG_SOURCES_DIR} UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR} CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DWITH_GFLAGS=ON CMAKE_ARGS -Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags CMAKE_ARGS -DBUILD_TESTING=OFF + CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GLOG_INSTALL_DIR} + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=Release ) LIST(APPEND external_project_dependencies glog) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index 11d829a9e2f239848803130505c9862695b25029..49c7d71443cda700a14af6be65ff6658eec7229f 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -41,11 +41,19 @@ IF(WITH_TESTING) GIT_TAG "release-1.8.0" PREFIX ${GTEST_SOURCES_DIR} UPDATE_COMMAND "" - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR} CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DBUILD_GMOCK=ON CMAKE_ARGS -Dgtest_disable_pthreads=ON CMAKE_ARGS -Dgtest_force_shared_crt=ON + CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=Release ) LIST(APPEND external_project_dependencies gtest) ENDIF(WITH_TESTING) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 29d17691db9f4575bae4372c61a0e1964e163fc9..92ea23c7633e974fd09251f967965364b1928307 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -29,7 +29,24 @@ IF(NOT ${CBLAS_FOUND}) IF(CMAKE_COMPILER_IS_GNUCC) ENABLE_LANGUAGE(Fortran) - LIST(APPEND CBLAS_LIBRARIES gfortran pthread) + if (NOT CMAKE_Fortran_COMPILER_VERSION) + # cmake < 3.4 cannot get CMAKE_Fortran_COMPILER_VERSION directly. + execute_process(COMMAND ${CMAKE_Fortran_COMPILER} -dumpversion + OUTPUT_VARIABLE CMAKE_Fortran_COMPILER_VERSION) + endif() + string(REGEX MATCHALL "[0-9]+" Fortran_VERSION ${CMAKE_Fortran_COMPILER_VERSION}) + list(GET Fortran_VERSION 0 Fortran_MAJOR) + list(GET Fortran_VERSION 1 Fortran_MINOR) + find_library(GFORTRAN_LIBRARY NAMES gfortran PATHS + /lib + /usr/lib + /usr/lib/gcc/x86_64-linux-gnu/${Fortran_MAJOR}.${Fortran_MINOR}/ + /usr/lib/gcc/x86_64-linux-gnu/${Fortran_MAJOR}/) + if (NOT GFORTRAN_LIBRARY) + message(FATAL_ERROR "Cannot found gfortran library which it is used by openblas") + endif() + find_package(Threads REQUIRED) + LIST(APPEND CBLAS_LIBRARIES ${GFORTRAN_LIBRARY} ${CMAKE_THREAD_LIBS_INIT}) ENDIF(CMAKE_COMPILER_IS_GNUCC) IF(NOT CMAKE_Fortran_COMPILER) @@ -37,6 +54,8 @@ IF(NOT ${CBLAS_FOUND}) "you need to set gfortran compiler: cmake .. -DCMAKE_Fortran_COMPILER=...") ENDIF(NOT CMAKE_Fortran_COMPILER) + ADD_DEFINITIONS(-DPADDLE_USE_LAPACK) + ExternalProject_Add( openblas ${EXTERNAL_PROJECT_LOG_ARGS} @@ -45,7 +64,7 @@ IF(NOT ${CBLAS_FOUND}) PREFIX ${CBLAS_SOURCES_DIR} INSTALL_DIR ${CBLAS_INSTALL_DIR} BUILD_IN_SOURCE 1 - BUILD_COMMAND ${CMAKE_MAKE_PROGRAM} FC=${CMAKE_Fortran_COMPILER} CC=${CMAKE_C_COMPILER} HOSTCC=${CMAKE_C_COMPILER} NO_SHARED=1 libs netlib + BUILD_COMMAND ${CMAKE_MAKE_PROGRAM} FC=${CMAKE_Fortran_COMPILER} CC=${CMAKE_C_COMPILER} HOSTCC=${CMAKE_C_COMPILER} DYNAMIC_ARCH=1 NO_SHARED=1 libs netlib INSTALL_COMMAND ${CMAKE_MAKE_PROGRAM} install NO_SHARED=1 PREFIX= UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 84f459033f06f89d3b150317793c7e62274468b2..2df042d226af8308d00f7870e7d2de0eacfdf07e 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -14,46 +14,67 @@ INCLUDE(ExternalProject) -SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/protobuf) -SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/protobuf) -SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) +set(PROTOBUF_VERSION 3.1) +FIND_PACKAGE(Protobuf ${PROTOBUF_VERSION}) -INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) +IF(PROTOBUF_FOUND) + EXEC_PROGRAM(${PROTOBUF_PROTOC_EXECUTABLE} ARGS --version OUTPUT_VARIABLE PROTOBUF_VERSION) + STRING(REGEX MATCH "[0-9]+.[0-9]+" PROTOBUF_VERSION "${PROTOBUF_VERSION}") + IF (${PROTOBUF_VERSION} VERSION_LESS "3.1.0") + SET(PROTOBUF_FOUND OFF) + ENDIF() +ENDIF(PROTOBUF_FOUND) + +IF(NOT PROTOBUF_FOUND) + SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/protobuf) + SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/protobuf) + SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) + + IF(WIN32) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) + ELSE(WIN32) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) + ENDIF(WIN32) -IF(WIN32) - SET(PROTOBUF_LITE_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" CACHE FILEPATH "protobuf lite library." FORCE) - SET(PROTOBUF_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" CACHE FILEPATH "protobuf library." FORCE) - SET(PROTOBUF_PROTOC_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) - SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) -ELSE(WIN32) - SET(PROTOBUF_LITE_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) - SET(PROTOBUF_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) - SET(PROTOBUF_PROTOC_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) - SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) -ENDIF(WIN32) - -ExternalProject_Add( - protobuf - ${EXTERNAL_PROJECT_LOG_ARGS} - PREFIX ${PROTOBUF_SOURCES_DIR} - UPDATE_COMMAND "" - DEPENDS zlib - GIT_REPOSITORY "https://github.com/google/protobuf.git" - GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" - CONFIGURE_COMMAND - ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake - -Dprotobuf_BUILD_TESTS=OFF - -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR=lib -) - -LIST(APPEND external_project_dependencies protobuf) + ExternalProject_Add( + protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${PROTOBUF_SOURCES_DIR} + UPDATE_COMMAND "" + DEPENDS zlib + GIT_REPOSITORY "https://github.com/google/protobuf.git" + GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" + CONFIGURE_COMMAND + ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake + -Dprotobuf_BUILD_TESTS=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=lib + CMAKE_CACHE_ARGS + -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} + -DCMAKE_BUILD_TYPE:STRING=Release + -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DZLIB_ROOT:STRING=${ZLIB_ROOT} + ) + + LIST(APPEND external_project_dependencies protobuf) +ENDIF(NOT PROTOBUF_FOUND) + +INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 0accf1a8dd83560324716f0f4936be56dd7a9f1b..9fd3afd0998b38c18b4490e6fb1c6fe0222ed142 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -219,5 +219,9 @@ ELSE(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) ENDIF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) -INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) -INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) +IF(WITH_PYTHON) + INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) + INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) +ELSE() + SET(PYTHON_LIBRARIES "") +ENDIF() diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 172c318b35d611d0432b78f2a18eb58a7d272b90..293070c3cfcc1196001f64469f3254289b0de792 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -50,12 +50,19 @@ ExternalProject_Add( UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} CMAKE_ARGS -DWITH_GPU=${WITH_GPU} CMAKE_ARGS -DWITH_OMP=${USE_OMP} CMAKE_ARGS -DWITH_TORCH=OFF - CMAKE_ARGS -DCMAKE_DISABLE_FIND_PACKAGE_Torch=TRUE + CMAKE_ARGS -DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON CMAKE_ARGS -DBUILD_SHARED=ON + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release + CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=Release + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_INSTALL_PREFIX:PATH=${WARPCTC_INSTALL_DIR} ) LIST(APPEND external_project_dependencies warpctc) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 47fa8817fb64fb8fd718e2892ad5bae7bbe956eb..45ca5542b7dc30216b45487782f849b93c5f8fca 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -22,7 +22,7 @@ SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include dire IF(WIN32) SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/zlibstatic.lib" CACHE FILEPATH "zlib library." FORCE) ELSE(WIN32) - set(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) + SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) ENDIF(WIN32) INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) @@ -34,10 +34,18 @@ ExternalProject_Add( GIT_TAG "v1.2.8" PREFIX ${ZLIB_SOURCES_DIR} UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR} CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON + CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ZLIB_INSTALL_DIR} + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=Release ) LIST(APPEND external_project_dependencies zlib) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index b76852fc6c50e80633c8294fb2724b83f15293a7..7eb92efcb00fa18461e61e0508b485c13ef23a1f 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -2,6 +2,7 @@ include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) include(CheckCXXSymbolExists) +include(CheckTypeSize) function(CheckCompilerCXX11Flag) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") @@ -25,7 +26,7 @@ function(CheckCompilerCXX11Flag) endfunction() CheckCompilerCXX11Flag() -LIST(APPEND CMAKE_CXX_FLAGS -std=c++11) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") # safe_set_flag # @@ -83,6 +84,17 @@ if(NOT UINT64_MAX_EXISTS) endif() endif() +SET(CMAKE_EXTRA_INCLUDE_FILES "pthread.h") +CHECK_TYPE_SIZE(pthread_spinlock_t SPINLOCK_FOUND) +CHECK_TYPE_SIZE(pthread_barrier_t BARRIER_FOUND) +if(SPINLOCK_FOUND) + add_definitions(-DPADDLE_USE_PTHREAD_SPINLOCK) +endif(SPINLOCK_FOUND) +if(BARRIER_FOUND) + add_definitions(-DPADDLE_USE_PTHREAD_BARRIER) +endif(BARRIER_FOUND) +SET(CMAKE_EXTRA_INCLUDE_FILES "") + # Common flags. the compiler flag used for C/C++ sources whenever release or debug # Do not care if this flag is support for gcc. set(COMMON_FLAGS diff --git a/cmake/simd.cmake b/cmake/simd.cmake index d380c996dfa95f0caa2b9cd9daa0ac9141e51fe0..46035a908b588861607a25d3a21cf34b7b6fd4b8 100644 --- a/cmake/simd.cmake +++ b/cmake/simd.cmake @@ -2,6 +2,7 @@ # so that PaddlePaddle can unleash the vectorization power of muticore. INCLUDE(CheckCXXSourceRuns) +INCLUDE(CheckCXXSourceCompiles) IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(MMX_FLAG "-mmmx") @@ -17,6 +18,8 @@ ELSEIF(MSVC) SET(AVX2_FLAG "/arch:AVX2") ENDIF() +set(CMAKE_REQUIRED_FLAGS_RETAINED ${CMAKE_REQUIRED_FLAGS}) + # Check MMX set(CMAKE_REQUIRED_FLAGS ${MMX_FLAG}) CHECK_CXX_SOURCE_RUNS(" @@ -73,4 +76,5 @@ int main() return 0; }" AVX2_FOUND) +set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_RETAINED}) mark_as_advanced(MMX_FOUND SSE2_FOUND SSE3_FOUND AVX_FOUND AVX2_FOUND) diff --git a/cmake/system.cmake b/cmake/system.cmake index 8da17744a431a4345c83475744d6872a80f2da57..75a9d8fc25674e1dd0f5b73cd0ccde48204f63aa 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -72,6 +72,12 @@ MARK_AS_ADVANCED(HOST_SYSTEM CPU_CORES) MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}") MESSAGE(STATUS "Found Paddle host system's CPU: ${CPU_CORES} cores") +IF(DEFINED CMAKE_SYSTEM_NAME) + IF(${CMAKE_SYSTEM_NAME} STREQUAL "Android") + SET(ANDROID TRUE) + ENDIF() +ENDIF() + # external dependencies log output SET(EXTERNAL_PROJECT_LOG_ARGS LOG_DOWNLOAD 0 # Wrap download in script to log output diff --git a/cmake/util.cmake b/cmake/util.cmake index 24ad5c815ca20d9b6b317b1be4d2dc93a9e06fba..099a85809d93e01772bf8c5c329fd9055ee4f054 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -71,21 +71,10 @@ function(link_paddle_exe TARGET_NAME) generate_rdma_links() endif() - if(WITH_METRIC) - if(WITH_GPU) - set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric metric_cpu) - else() - set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric_cpu) - endif() - else() - set(METRIC_LIBS "") - endif() - target_circle_link_libraries(${TARGET_NAME} ARCHIVE_START paddle_gserver paddle_function - ${METRIC_LIBS} ARCHIVE_END paddle_pserver paddle_trainer_lib @@ -95,32 +84,15 @@ function(link_paddle_exe TARGET_NAME) paddle_parameter paddle_proto paddle_cuda - ${METRIC_LIBS} ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} ${RDMA_LD_FLAGS} ${RDMA_LIBS}) - if(WITH_PYTHON) - target_link_libraries(${TARGET_NAME} - ${PYTHON_LIBRARIES} util) - endif() - - if(WITH_GPU) - target_link_libraries(${TARGET_NAME} ${CUDA_CUDART_LIBRARY}) - if(NOT WITH_DSO OR WITH_METRIC) - target_link_libraries(${TARGET_NAME} - ${CUDNN_LIBRARY} - ${CUDA_curand_LIBRARY}) - CUDA_ADD_CUBLAS_TO_TARGET(${TARGET_NAME}) - endif() - - check_library_exists(rt clock_gettime "time.h" HAVE_CLOCK_GETTIME ) - if(HAVE_CLOCK_GETTIME) - target_link_libraries(${TARGET_NAME} rt) - endif() - endif() + if(ANDROID) + target_link_libraries(${TARGET_NAME} log) + endif(ANDROID) add_dependencies(${TARGET_NAME} ${external_project_dependencies}) endfunction() diff --git a/demo/image_classification/api_v2_resnet.py b/demo/image_classification/api_v2_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..19d20540780becf504973a23b50445d4b65dc2ef --- /dev/null +++ b/demo/image_classification/api_v2_resnet.py @@ -0,0 +1,74 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.v2 as paddle + +__all__ = ['resnet_cifar10'] + + +def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + active_type=paddle.activation.Relu(), + ch_in=None): + tmp = paddle.layer.img_conv( + input=input, + filter_size=filter_size, + num_channels=ch_in, + num_filters=ch_out, + stride=stride, + padding=padding, + act=paddle.activation.Linear(), + bias_attr=False) + return paddle.layer.batch_norm(input=tmp, act=active_type) + + +def shortcut(ipt, n_in, n_out, stride): + if n_in != n_out: + return conv_bn_layer(ipt, n_out, 1, stride, 0, + paddle.activation.Linear()) + else: + return ipt + + +def basicblock(ipt, ch_out, stride): + ch_in = ch_out * 2 + tmp = conv_bn_layer(ipt, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, paddle.activation.Linear()) + short = shortcut(ipt, ch_in, ch_out, stride) + return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu()) + + +def layer_warp(block_func, ipt, features, count, stride): + tmp = block_func(ipt, features, stride) + for i in range(1, count): + tmp = block_func(tmp, features, 1) + return tmp + + +def resnet_cifar10(ipt, depth=32): + # depth should be one of 20, 32, 44, 56, 110, 1202 + assert (depth - 2) % 6 == 0 + n = (depth - 2) / 6 + nStages = {16, 64, 128} + conv1 = conv_bn_layer( + ipt, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, n, 1) + res2 = layer_warp(basicblock, res1, 32, n, 2) + res3 = layer_warp(basicblock, res2, 64, n, 2) + pool = paddle.layer.img_pool( + input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) + return pool diff --git a/demo/image_classification/api_v2_train.py b/demo/image_classification/api_v2_train.py new file mode 100644 index 0000000000000000000000000000000000000000..53cffa6fb4e8b2e19725f4f44bf7b9ffffb25232 --- /dev/null +++ b/demo/image_classification/api_v2_train.py @@ -0,0 +1,92 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +import sys + +import paddle.v2 as paddle + +from api_v2_vgg import vgg_bn_drop + + +def main(): + datadim = 3 * 32 * 32 + classdim = 10 + + # PaddlePaddle init + paddle.init(use_gpu=False, trainer_count=1) + + image = paddle.layer.data( + name="image", type=paddle.data_type.dense_vector(datadim)) + + # Add neural network config + # option 1. resnet + # net = resnet_cifar10(image, depth=32) + # option 2. vgg + net = vgg_bn_drop(image) + + out = paddle.layer.fc(input=net, + size=classdim, + act=paddle.activation.Softmax()) + + lbl = paddle.layer.data( + name="label", type=paddle.data_type.integer_value(classdim)) + cost = paddle.layer.classification_cost(input=out, label=lbl) + + # Create parameters + parameters = paddle.parameters.create(cost) + + # Create optimizer + momentum_optimizer = paddle.optimizer.Momentum( + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128), + learning_rate=0.1 / 128.0, + learning_rate_decay_a=0.1, + learning_rate_decay_b=50000 * 100, + learning_rate_schedule='discexp', + batch_size=128) + + # End batch and end pass event handler + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "\nPass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + else: + sys.stdout.write('.') + sys.stdout.flush() + if isinstance(event, paddle.event.EndPass): + result = trainer.test( + reader=paddle.batch( + paddle.dataset.cifar.test10(), batch_size=128), + feeding={'image': 0, + 'label': 1}) + print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) + + # Create trainer + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=momentum_optimizer) + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=50000), + batch_size=128), + num_passes=5, + event_handler=event_handler, + feeding={'image': 0, + 'label': 1}) + + +if __name__ == '__main__': + main() diff --git a/demo/image_classification/api_v2_vgg.py b/demo/image_classification/api_v2_vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..1e0e6b93adde30425f17aa9cd07542275f4fec37 --- /dev/null +++ b/demo/image_classification/api_v2_vgg.py @@ -0,0 +1,47 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.v2 as paddle + +__all__ = ['vgg_bn_drop'] + + +def vgg_bn_drop(input): + def conv_block(ipt, num_filter, groups, dropouts, num_channels=None): + return paddle.networks.img_conv_group( + input=ipt, + num_channels=num_channels, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act=paddle.activation.Relu(), + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type=paddle.pooling.Max()) + + conv1 = conv_block(input, 64, 2, [0.3, 0], 3) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = paddle.layer.dropout(input=conv5, dropout_rate=0.5) + fc1 = paddle.layer.fc(input=drop, size=512, act=paddle.activation.Linear()) + bn = paddle.layer.batch_norm( + input=fc1, + act=paddle.activation.Relu(), + layer_attr=paddle.attr.Extra(drop_rate=0.5)) + fc2 = paddle.layer.fc(input=bn, size=512, act=paddle.activation.Linear()) + return fc2 diff --git a/demo/image_classification/prediction.py b/demo/image_classification/prediction.py index 9a86aafcb2fa4d4354d1dd9443c1b73ddcda980b..49c0ff600c40e0222093ff0a8a2f7e8e38ccba29 100755 --- a/demo/image_classification/prediction.py +++ b/demo/image_classification/prediction.py @@ -126,7 +126,7 @@ class ImageClassifier(): # For oversampling, average predictions across crops. # If not, the shape of output[name]: (1, class_number), # the mean is also applicable. - return output[output_layer].mean(0) + return output[output_layer]['value'].mean(0) def predict(self, image=None, output_layer=None): assert isinstance(image, basestring) diff --git a/demo/introduction/api_train_v2.py b/demo/introduction/api_train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba971b3688ce3dec078998df2c0b183a4e449f8 --- /dev/null +++ b/demo/introduction/api_train_v2.py @@ -0,0 +1,58 @@ +import paddle.v2 as paddle +import paddle.v2.dataset.uci_housing as uci_housing + + +def main(): + # init + paddle.init(use_gpu=False, trainer_count=1) + + # network config + x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) + y_predict = paddle.layer.fc(input=x, + param_attr=paddle.attr.Param(name='w'), + size=1, + act=paddle.activation.Linear(), + bias_attr=paddle.attr.Param(name='b')) + y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1)) + cost = paddle.layer.mse_cost(input=y_predict, label=y) + + # create parameters + parameters = paddle.parameters.create(cost) + + # create optimizer + optimizer = paddle.optimizer.Momentum(momentum=0) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer) + + # event_handler to print training and testing info + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f" % ( + event.pass_id, event.batch_id, event.cost) + + if isinstance(event, paddle.event.EndPass): + if (event.pass_id + 1) % 10 == 0: + result = trainer.test( + reader=paddle.batch( + uci_housing.test(), batch_size=2), + feeding={'x': 0, + 'y': 1}) + print "Test %d, %.2f" % (event.pass_id, result.cost) + + # training + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + uci_housing.train(), buf_size=500), + batch_size=2), + feeding={'x': 0, + 'y': 1}, + event_handler=event_handler, + num_passes=30) + + +if __name__ == '__main__': + main() diff --git a/demo/introduction/trainer_config.py b/demo/introduction/trainer_config.py index ecafe955f9e5c1062168d5d7b6b4c639d6e72a99..651dfaa4b7b4873810a0b393655541a62d1a311b 100644 --- a/demo/introduction/trainer_config.py +++ b/demo/introduction/trainer_config.py @@ -34,5 +34,5 @@ y_predict = fc_layer( size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) -cost = regression_cost(input=y_predict, label=y) +cost = mse_cost(input=y_predict, label=y) outputs(cost) diff --git a/demo/mnist/.gitignore b/demo/mnist/.gitignore index 8bd9837523ccf98e6e72d5b82934b7b104816217..7e61d5e3a0cabd46d4185454d46610ac2ee2e63f 100644 --- a/demo/mnist/.gitignore +++ b/demo/mnist/.gitignore @@ -5,3 +5,6 @@ plot.png train.log *pyc .ipynb_checkpoints +params.pkl +params.tar +params.tar.gz diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 6fc01ce58be57c77144c6558d039430b22d3a746..6b95a88042a13a280bcb80f753b3887fcef37296 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -1,14 +1,58 @@ -import numpy import paddle.v2 as paddle +import gzip -import mnist_util +def softmax_regression(img): + predict = paddle.layer.fc(input=img, + size=10, + act=paddle.activation.Softmax()) + return predict -def train_reader(): - train_file = './data/raw_data/train' - generator = mnist_util.read_from_mnist(train_file) - for item in generator: - yield item + +def multilayer_perceptron(img): + # The first fully-connected layer + hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu()) + # The second fully-connected layer and the according activation function + hidden2 = paddle.layer.fc(input=hidden1, + size=64, + act=paddle.activation.Relu()) + # The thrid fully-connected layer, note that the hidden size should be 10, + # which is the number of unique digits + predict = paddle.layer.fc(input=hidden2, + size=10, + act=paddle.activation.Softmax()) + return predict + + +def convolutional_neural_network(img): + # first conv layer + conv_pool_1 = paddle.networks.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + num_channel=1, + pool_size=2, + pool_stride=2, + act=paddle.activation.Tanh()) + # second conv layer + conv_pool_2 = paddle.networks.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + num_channel=20, + pool_size=2, + pool_stride=2, + act=paddle.activation.Tanh()) + # The first fully-connected layer + fc1 = paddle.layer.fc(input=conv_pool_2, + size=128, + act=paddle.activation.Tanh()) + # The softmax layer, note that the hidden size should be 10, + # which is the number of unique digits + predict = paddle.layer.fc(input=fc1, + size=10, + act=paddle.activation.Softmax()) + return predict def main(): @@ -19,42 +63,74 @@ def main(): name='pixel', type=paddle.data_type.dense_vector(784)) label = paddle.layer.data( name='label', type=paddle.data_type.integer_value(10)) - hidden1 = paddle.layer.fc(input=images, size=200) - hidden2 = paddle.layer.fc(input=hidden1, size=200) - inference = paddle.layer.fc(input=hidden2, - size=10, - act=paddle.activation.Softmax()) - cost = paddle.layer.classification_cost(input=inference, label=label) - parameters = paddle.parameters.create(cost) - for param_name in parameters.keys(): - array = parameters.get(param_name) - array[:] = numpy.random.uniform(low=-1.0, high=1.0, size=array.shape) - parameters.set(parameter_name=param_name, value=array) + # Here we can build the prediction network in different ways. Please + # choose one by uncomment corresponding line. + predict = softmax_regression(images) + #predict = multilayer_perceptron(images) + #predict = convolutional_neural_network(images) + + cost = paddle.layer.classification_cost(input=predict, label=label) + + try: + with gzip.open('params.tar.gz', 'r') as f: + parameters = paddle.parameters.Parameters.from_tar(f) + except IOError: + parameters = paddle.parameters.create(cost) + + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1 / 128.0, + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128)) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer) - adam_optimizer = paddle.optimizer.Adam(learning_rate=0.01) + lists = [] def event_handler(event): if isinstance(event, paddle.event.EndIteration): - para = parameters.get('___fc_2__.w0') - print "Pass %d, Batch %d, Cost %f, Weight Mean Of Fc 2 is %f" % ( - event.pass_id, event.batch_id, event.cost, para.mean()) - - else: - pass - - trainer = paddle.trainer.SGD(update_equation=adam_optimizer) - - trainer.train(train_data_reader=train_reader, - topology=cost, - parameters=parameters, - event_handler=event_handler, - batch_size=32, # batch size should be refactor in Data reader - data_types={ # data_types will be removed, It should be in - # network topology - 'pixel': images.type, - 'label': label.type - }) + if event.batch_id % 1000 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + + with gzip.open('params.tar.gz', 'w') as f: + parameters.to_tar(f) + + elif isinstance(event, paddle.event.EndPass): + result = trainer.test(reader=paddle.batch( + paddle.dataset.mnist.test(), batch_size=128)) + print "Test with Pass %d, Cost %f, %s\n" % ( + event.pass_id, result.cost, result.metrics) + lists.append((event.pass_id, result.cost, + result.metrics['classification_error_evaluator'])) + + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=8192), + batch_size=128), + event_handler=event_handler, + num_passes=100) + + # find the best pass + best = sorted(lists, key=lambda list: float(list[1]))[0] + print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1]) + print 'The classification accuracy is %.2f%%' % (100 - float(best[2]) * 100) + + test_creator = paddle.dataset.mnist.test() + test_data = [] + for item in test_creator(): + test_data.append((item[0], )) + if len(test_data) == 100: + break + + # output is a softmax layer. It returns probabilities. + # Shape should be (100, 10) + probs = paddle.infer( + output_layer=predict, parameters=parameters, input=test_data) + print probs.shape if __name__ == '__main__': diff --git a/demo/model_zoo/resnet/classify.py b/demo/model_zoo/resnet/classify.py index 4631816c43ef48839df1863a0a86c3ab00924d3f..6074cc1d3a85e13e3e8d336d81e22104f9d8e7cf 100755 --- a/demo/model_zoo/resnet/classify.py +++ b/demo/model_zoo/resnet/classify.py @@ -156,7 +156,7 @@ class ImageClassifier(): # For oversampling, average predictions across crops. # If not, the shape of output[name]: (1, class_number), # the mean is also applicable. - res[name] = output[name].mean(0) + res[name] = output[name]['value'].mean(0) return res diff --git a/demo/recommendation/api_train_v2.py b/demo/recommendation/api_train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a061799e3ac50236a68beedaf700dd6c698a05 --- /dev/null +++ b/demo/recommendation/api_train_v2.py @@ -0,0 +1,125 @@ +import paddle.v2 as paddle +import cPickle +import copy + + +def main(): + paddle.init(use_gpu=False) + movie_title_dict = paddle.dataset.movielens.get_movie_title_dict() + uid = paddle.layer.data( + name='user_id', + type=paddle.data_type.integer_value( + paddle.dataset.movielens.max_user_id() + 1)) + usr_emb = paddle.layer.embedding(input=uid, size=32) + + usr_gender_id = paddle.layer.data( + name='gender_id', type=paddle.data_type.integer_value(2)) + usr_gender_emb = paddle.layer.embedding(input=usr_gender_id, size=16) + + usr_age_id = paddle.layer.data( + name='age_id', + type=paddle.data_type.integer_value( + len(paddle.dataset.movielens.age_table))) + usr_age_emb = paddle.layer.embedding(input=usr_age_id, size=16) + + usr_job_id = paddle.layer.data( + name='job_id', + type=paddle.data_type.integer_value(paddle.dataset.movielens.max_job_id( + ) + 1)) + + usr_job_emb = paddle.layer.embedding(input=usr_job_id, size=16) + + usr_combined_features = paddle.layer.fc( + input=[usr_emb, usr_gender_emb, usr_age_emb, usr_job_emb], + size=200, + act=paddle.activation.Tanh()) + + mov_id = paddle.layer.data( + name='movie_id', + type=paddle.data_type.integer_value( + paddle.dataset.movielens.max_movie_id() + 1)) + mov_emb = paddle.layer.embedding(input=mov_id, size=32) + + mov_categories = paddle.layer.data( + name='category_id', + type=paddle.data_type.sparse_binary_vector( + len(paddle.dataset.movielens.movie_categories()))) + + mov_categories_hidden = paddle.layer.fc(input=mov_categories, size=32) + + mov_title_id = paddle.layer.data( + name='movie_title', + type=paddle.data_type.integer_value_sequence(len(movie_title_dict))) + mov_title_emb = paddle.layer.embedding(input=mov_title_id, size=32) + mov_title_conv = paddle.networks.sequence_conv_pool( + input=mov_title_emb, hidden_size=32, context_len=3) + + mov_combined_features = paddle.layer.fc( + input=[mov_emb, mov_categories_hidden, mov_title_conv], + size=200, + act=paddle.activation.Tanh()) + + inference = paddle.layer.cos_sim( + a=usr_combined_features, b=mov_combined_features, size=1, scale=5) + cost = paddle.layer.mse_cost( + input=inference, + label=paddle.layer.data( + name='score', type=paddle.data_type.dense_vector(1))) + + parameters = paddle.parameters.create(cost) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=paddle.optimizer.Adam( + learning_rate=1e-4)) + feeding = { + 'user_id': 0, + 'gender_id': 1, + 'age_id': 2, + 'job_id': 3, + 'movie_id': 4, + 'category_id': 5, + 'movie_title': 6, + 'score': 7 + } + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d Batch %d Cost %.2f" % ( + event.pass_id, event.batch_id, event.cost) + + trainer.train( + reader=paddle.batch( + paddle.reader.shuffle( + paddle.dataset.movielens.train(), buf_size=8192), + batch_size=256), + event_handler=event_handler, + feeding=feeding, + num_passes=1) + + user_id = 234 + movie_id = 345 + + user = paddle.dataset.movielens.user_info()[user_id] + movie = paddle.dataset.movielens.movie_info()[movie_id] + + feature = user.value() + movie.value() + + def reader(): + yield feature + + infer_dict = copy.copy(feeding) + del infer_dict['score'] + + prediction = paddle.infer( + output=inference, + parameters=parameters, + reader=paddle.batch( + reader, batch_size=32), + feeding=infer_dict) + print(prediction + 5) / 2 + + +if __name__ == '__main__': + main() diff --git a/demo/recommendation/trainer_config.py b/demo/recommendation/trainer_config.py index aabcd335253faf69c940024ac8098a54da030463..25f529d7d7c430f179107fb189ade34760ab309d 100755 --- a/demo/recommendation/trainer_config.py +++ b/demo/recommendation/trainer_config.py @@ -86,10 +86,7 @@ movie_feature = construct_feature("movie") user_feature = construct_feature("user") similarity = cos_sim(a=movie_feature, b=user_feature) if not is_predict: - outputs( - regression_cost( - input=similarity, label=data_layer( - 'rating', size=1))) + outputs(mse_cost(input=similarity, label=data_layer('rating', size=1))) define_py_data_sources2( 'data/train.list', diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..036cad4b0a32357bb42580ef577a1eba558be8fe --- /dev/null +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -0,0 +1,190 @@ +import sys +import math +import numpy as np +import paddle.v2 as paddle +import paddle.v2.dataset.conll05 as conll05 + + +def db_lstm(): + word_dict, verb_dict, label_dict = conll05.get_dict() + word_dict_len = len(word_dict) + label_dict_len = len(label_dict) + pred_len = len(verb_dict) + + mark_dict_len = 2 + word_dim = 32 + mark_dim = 5 + hidden_dim = 512 + depth = 8 + + #8 features + def d_type(size): + return paddle.data_type.integer_value_sequence(size) + + word = paddle.layer.data(name='word_data', type=d_type(word_dict_len)) + predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len)) + + ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len)) + ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len)) + ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len)) + ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len)) + ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len)) + mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len)) + + target = paddle.layer.data(name='target', type=d_type(label_dict_len)) + + default_std = 1 / math.sqrt(hidden_dim) / 3.0 + + emb_para = paddle.attr.Param(name='emb', initial_std=0., learning_rate=0.) + std_0 = paddle.attr.Param(initial_std=0.) + std_default = paddle.attr.Param(initial_std=default_std) + + predicate_embedding = paddle.layer.embedding( + size=word_dim, + input=predicate, + param_attr=paddle.attr.Param( + name='vemb', initial_std=default_std)) + mark_embedding = paddle.layer.embedding( + size=mark_dim, input=mark, param_attr=std_0) + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + paddle.layer.embedding( + size=word_dim, input=x, param_attr=emb_para) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0 = paddle.layer.mixed( + size=hidden_dim, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=emb, param_attr=std_default) for emb in emb_layers + ]) + + mix_hidden_lr = 1e-3 + lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0) + hidden_para_attr = paddle.attr.Param( + initial_std=default_std, learning_rate=mix_hidden_lr) + + lstm_0 = paddle.layer.lstmemory( + input=hidden_0, + act=paddle.activation.Relu(), + gate_act=paddle.activation.Sigmoid(), + state_act=paddle.activation.Sigmoid(), + bias_attr=std_0, + param_attr=lstm_para_attr) + + #stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = paddle.layer.mixed( + size=hidden_dim, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + paddle.layer.full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ]) + + lstm = paddle.layer.lstmemory( + input=mix_hidden, + act=paddle.activation.Relu(), + gate_act=paddle.activation.Sigmoid(), + state_act=paddle.activation.Sigmoid(), + reverse=((i % 2) == 1), + bias_attr=std_0, + param_attr=lstm_para_attr) + + input_tmp = [mix_hidden, lstm] + + feature_out = paddle.layer.mixed( + size=label_dict_len, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + paddle.layer.full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ], ) + + crf_cost = paddle.layer.crf(size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param( + name='crfw', + initial_std=default_std, + learning_rate=mix_hidden_lr)) + + crf_dec = paddle.layer.crf_decoding( + name='crf_dec_l', + size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param(name='crfw')) + + return crf_cost, crf_dec + + +def load_parameter(file_name, h, w): + with open(file_name, 'rb') as f: + f.read(16) # skip header. + return np.fromfile(f, dtype=np.float32).reshape(h, w) + + +def main(): + paddle.init(use_gpu=False, trainer_count=1) + + # define network topology + crf_cost, crf_dec = db_lstm() + + # create parameters + parameters = paddle.parameters.create([crf_cost, crf_dec]) + + # create optimizer + optimizer = paddle.optimizer.Momentum( + momentum=0, + learning_rate=2e-2, + regularization=paddle.optimizer.L2Regularization(rate=8e-4), + model_average=paddle.optimizer.ModelAverage( + average_window=0.5, max_average_window=10000), ) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + + trainer = paddle.trainer.SGD(cost=crf_cost, + parameters=parameters, + update_equation=optimizer) + parameters.set('emb', load_parameter(conll05.get_embedding(), 44068, 32)) + + trn_reader = paddle.batch( + paddle.reader.shuffle( + conll05.test(), buf_size=8192), batch_size=10) + + feeding = { + 'word_data': 0, + 'ctx_n2_data': 1, + 'ctx_n1_data': 2, + 'ctx_0_data': 3, + 'ctx_p1_data': 4, + 'ctx_p2_data': 5, + 'verb_data': 6, + 'mark_data': 7, + 'target': 8 + } + + trainer.train( + reader=trn_reader, + event_handler=event_handler, + num_passes=10000, + feeding=feeding) + + +if __name__ == '__main__': + main() diff --git a/demo/sentiment/train_v2.py b/demo/sentiment/train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..1c856556bd0cb32f60eba322469b3621c37e1349 --- /dev/null +++ b/demo/sentiment/train_v2.py @@ -0,0 +1,159 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle.v2 as paddle + + +def convolution_net(input_dim, class_dim=2, emb_dim=128, hid_dim=128): + data = paddle.layer.data("word", + paddle.data_type.integer_value_sequence(input_dim)) + emb = paddle.layer.embedding(input=data, size=emb_dim) + conv_3 = paddle.networks.sequence_conv_pool( + input=emb, context_len=3, hidden_size=hid_dim) + conv_4 = paddle.networks.sequence_conv_pool( + input=emb, context_len=4, hidden_size=hid_dim) + output = paddle.layer.fc(input=[conv_3, conv_4], + size=class_dim, + act=paddle.activation.Softmax()) + lbl = paddle.layer.data("label", paddle.data_type.integer_value(2)) + cost = paddle.layer.classification_cost(input=output, label=lbl) + return cost + + +def stacked_lstm_net(input_dim, + class_dim=2, + emb_dim=128, + hid_dim=512, + stacked_num=3): + """ + A Wrapper for sentiment classification task. + This network uses bi-directional recurrent network, + consisting three LSTM layers. This configure is referred to + the paper as following url, but use fewer layrs. + http://www.aclweb.org/anthology/P15-1109 + + input_dim: here is word dictionary dimension. + class_dim: number of categories. + emb_dim: dimension of word embedding. + hid_dim: dimension of hidden layer. + stacked_num: number of stacked lstm-hidden layer. + """ + assert stacked_num % 2 == 1 + + layer_attr = paddle.attr.Extra(drop_rate=0.5) + fc_para_attr = paddle.attr.Param(learning_rate=1e-3) + lstm_para_attr = paddle.attr.Param(initial_std=0., learning_rate=1.) + para_attr = [fc_para_attr, lstm_para_attr] + bias_attr = paddle.attr.Param(initial_std=0., l2_rate=0.) + relu = paddle.activation.Relu() + linear = paddle.activation.Linear() + + data = paddle.layer.data("word", + paddle.data_type.integer_value_sequence(input_dim)) + emb = paddle.layer.embedding(input=data, size=emb_dim) + + fc1 = paddle.layer.fc(input=emb, + size=hid_dim, + act=linear, + bias_attr=bias_attr) + lstm1 = paddle.layer.lstmemory( + input=fc1, act=relu, bias_attr=bias_attr, layer_attr=layer_attr) + + inputs = [fc1, lstm1] + for i in range(2, stacked_num + 1): + fc = paddle.layer.fc(input=inputs, + size=hid_dim, + act=linear, + param_attr=para_attr, + bias_attr=bias_attr) + lstm = paddle.layer.lstmemory( + input=fc, + reverse=(i % 2) == 0, + act=relu, + bias_attr=bias_attr, + layer_attr=layer_attr) + inputs = [fc, lstm] + + fc_last = paddle.layer.pooling( + input=inputs[0], pooling_type=paddle.pooling.Max()) + lstm_last = paddle.layer.pooling( + input=inputs[1], pooling_type=paddle.pooling.Max()) + output = paddle.layer.fc(input=[fc_last, lstm_last], + size=class_dim, + act=paddle.activation.Softmax(), + bias_attr=bias_attr, + param_attr=para_attr) + + lbl = paddle.layer.data("label", paddle.data_type.integer_value(2)) + cost = paddle.layer.classification_cost(input=output, label=lbl) + return cost + + +if __name__ == '__main__': + # init + paddle.init(use_gpu=False) + + #data + print 'load dictionary...' + word_dict = paddle.dataset.imdb.word_dict() + dict_dim = len(word_dict) + class_dim = 2 + train_reader = paddle.batch( + paddle.reader.shuffle( + lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000), + batch_size=100) + test_reader = paddle.batch( + lambda: paddle.dataset.imdb.test(word_dict), batch_size=100) + + feeding = {'word': 0, 'label': 1} + + # network config + # Please choose the way to build the network + # by uncommenting the corresponding line. + cost = convolution_net(dict_dim, class_dim=class_dim) + # cost = stacked_lstm_net(dict_dim, class_dim=class_dim, stacked_num=3) + + # create parameters + parameters = paddle.parameters.create(cost) + + # create optimizer + adam_optimizer = paddle.optimizer.Adam( + learning_rate=2e-3, + regularization=paddle.optimizer.L2Regularization(rate=8e-4), + model_average=paddle.optimizer.ModelAverage(average_window=0.5)) + + # End batch and end pass event handler + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "\nPass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + else: + sys.stdout.write('.') + sys.stdout.flush() + if isinstance(event, paddle.event.EndPass): + result = trainer.test(reader=test_reader, feeding=feeding) + print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) + + # create trainer + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=adam_optimizer) + + trainer.train( + reader=train_reader, + event_handler=event_handler, + feeding=feeding, + num_passes=2) diff --git a/demo/seqToseq/api_train_v2.py b/demo/seqToseq/api_train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..3072c375123a2713c655b09fb28001960c9ab64d --- /dev/null +++ b/demo/seqToseq/api_train_v2.py @@ -0,0 +1,214 @@ +import sys + +import paddle.v2 as paddle + + +def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): + ### Network Architecture + word_vector_dim = 512 # dimension of word vector + decoder_size = 512 # dimension of hidden unit in GRU Decoder network + encoder_size = 512 # dimension of hidden unit in GRU Encoder network + + beam_size = 3 + max_length = 250 + + #### Encoder + src_word_id = paddle.layer.data( + name='source_language_word', + type=paddle.data_type.integer_value_sequence(source_dict_dim)) + src_embedding = paddle.layer.embedding( + input=src_word_id, + size=word_vector_dim, + param_attr=paddle.attr.ParamAttr(name='_source_language_embedding')) + src_forward = paddle.networks.simple_gru( + input=src_embedding, size=encoder_size) + src_backward = paddle.networks.simple_gru( + input=src_embedding, size=encoder_size, reverse=True) + encoded_vector = paddle.layer.concat(input=[src_forward, src_backward]) + + #### Decoder + with paddle.layer.mixed(size=decoder_size) as encoded_proj: + encoded_proj += paddle.layer.full_matrix_projection( + input=encoded_vector) + + backward_first = paddle.layer.first_seq(input=src_backward) + + with paddle.layer.mixed( + size=decoder_size, act=paddle.activation.Tanh()) as decoder_boot: + decoder_boot += paddle.layer.full_matrix_projection( + input=backward_first) + + def gru_decoder_with_attention(enc_vec, enc_proj, current_word): + + decoder_mem = paddle.layer.memory( + name='gru_decoder', size=decoder_size, boot_layer=decoder_boot) + + context = paddle.networks.simple_attention( + encoded_sequence=enc_vec, + encoded_proj=enc_proj, + decoder_state=decoder_mem) + + with paddle.layer.mixed(size=decoder_size * 3) as decoder_inputs: + decoder_inputs += paddle.layer.full_matrix_projection(input=context) + decoder_inputs += paddle.layer.full_matrix_projection( + input=current_word) + + gru_step = paddle.layer.gru_step( + name='gru_decoder', + input=decoder_inputs, + output_mem=decoder_mem, + size=decoder_size) + + with paddle.layer.mixed( + size=target_dict_dim, + bias_attr=True, + act=paddle.activation.Softmax()) as out: + out += paddle.layer.full_matrix_projection(input=gru_step) + return out + + decoder_group_name = "decoder_group" + group_input1 = paddle.layer.StaticInputV2(input=encoded_vector, is_seq=True) + group_input2 = paddle.layer.StaticInputV2(input=encoded_proj, is_seq=True) + group_inputs = [group_input1, group_input2] + + if not is_generating: + trg_embedding = paddle.layer.embedding( + input=paddle.layer.data( + name='target_language_word', + type=paddle.data_type.integer_value_sequence(target_dict_dim)), + size=word_vector_dim, + param_attr=paddle.attr.ParamAttr(name='_target_language_embedding')) + group_inputs.append(trg_embedding) + + # For decoder equipped with attention mechanism, in training, + # target embeding (the groudtruth) is the data input, + # while encoded source sequence is accessed to as an unbounded memory. + # Here, the StaticInput defines a read-only memory + # for the recurrent_group. + decoder = paddle.layer.recurrent_group( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs) + + lbl = paddle.layer.data( + name='target_language_next_word', + type=paddle.data_type.integer_value_sequence(target_dict_dim)) + cost = paddle.layer.classification_cost(input=decoder, label=lbl) + + return cost + else: + # In generation, the decoder predicts a next target word based on + # the encoded source sequence and the last generated target word. + + # The encoded source sequence (encoder's output) must be specified by + # StaticInput, which is a read-only memory. + # Embedding of the last generated word is automatically gotten by + # GeneratedInputs, which is initialized by a start mark, such as , + # and must be included in generation. + + trg_embedding = paddle.layer.GeneratedInputV2( + size=target_dict_dim, + embedding_name='_target_language_embedding', + embedding_size=word_vector_dim) + group_inputs.append(trg_embedding) + + beam_gen = paddle.layer.beam_search( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs, + bos_id=0, + eos_id=1, + beam_size=beam_size, + max_length=max_length) + + return beam_gen + + +def main(): + paddle.init(use_gpu=False, trainer_count=1) + is_generating = False + + # source and target dict dim. + dict_size = 30000 + source_dict_dim = target_dict_dim = dict_size + + # train the network + if not is_generating: + cost = seqToseq_net(source_dict_dim, target_dict_dim) + parameters = paddle.parameters.create(cost) + + # define optimize method and trainer + optimizer = paddle.optimizer.Adam( + learning_rate=5e-5, + regularization=paddle.optimizer.L2Regularization(rate=8e-4)) + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer) + # define data reader + wmt14_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=8192), + batch_size=5) + + # define event_handler callback + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 10 == 0: + print "\nPass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, + event.metrics) + else: + sys.stdout.write('.') + sys.stdout.flush() + + # start to train + trainer.train( + reader=wmt14_reader, event_handler=event_handler, num_passes=2) + + # generate a english sequence to french + else: + # use the first 3 samples for generation + gen_creator = paddle.dataset.wmt14.gen(dict_size) + gen_data = [] + gen_num = 3 + for item in gen_creator(): + gen_data.append((item[0], )) + if len(gen_data) == gen_num: + break + + beam_gen = seqToseq_net(source_dict_dim, target_dict_dim, is_generating) + # get the pretrained model, whose bleu = 26.92 + parameters = paddle.dataset.wmt14.model() + # prob is the prediction probabilities, and id is the prediction word. + beam_result = paddle.infer( + output_layer=beam_gen, + parameters=parameters, + input=gen_data, + field=['prob', 'id']) + + # get the dictionary + src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) + + # the delimited element of generated sequences is -1, + # the first element of each generated sequence is the sequence length + seq_list = [] + seq = [] + for w in beam_result[1]: + if w != -1: + seq.append(w) + else: + seq_list.append(' '.join([trg_dict.get(w) for w in seq[1:]])) + seq = [] + + prob = beam_result[0] + beam_size = 3 + for i in xrange(gen_num): + print "\n*******************************************************\n" + print "src:", ' '.join( + [src_dict.get(w) for w in gen_data[i][0]]), "\n" + for j in xrange(beam_size): + print "prob = %f:" % (prob[i][j]), seq_list[i * beam_size + j] + + +if __name__ == '__main__': + main() diff --git a/demo/word2vec/train_v2.py b/demo/word2vec/train_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..7d952b446f9db432062fc3305a6b65b0ad66dd47 --- /dev/null +++ b/demo/word2vec/train_v2.py @@ -0,0 +1,80 @@ +import math + +import paddle.v2 as paddle + +dictsize = 1953 +embsize = 32 +hiddensize = 256 +N = 5 + + +def wordemb(inlayer): + wordemb = paddle.layer.table_projection( + input=inlayer, + size=embsize, + param_attr=paddle.attr.Param( + name="_proj", + initial_std=0.001, + learning_rate=1, + l2_rate=0, )) + return wordemb + + +def main(): + paddle.init(use_gpu=False, trainer_count=1) + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + firstword = paddle.layer.data( + name="firstw", type=paddle.data_type.integer_value(dict_size)) + secondword = paddle.layer.data( + name="secondw", type=paddle.data_type.integer_value(dict_size)) + thirdword = paddle.layer.data( + name="thirdw", type=paddle.data_type.integer_value(dict_size)) + fourthword = paddle.layer.data( + name="fourthw", type=paddle.data_type.integer_value(dict_size)) + nextword = paddle.layer.data( + name="fifthw", type=paddle.data_type.integer_value(dict_size)) + + Efirst = wordemb(firstword) + Esecond = wordemb(secondword) + Ethird = wordemb(thirdword) + Efourth = wordemb(fourthword) + + contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth]) + hidden1 = paddle.layer.fc(input=contextemb, + size=hiddensize, + act=paddle.activation.Sigmoid(), + layer_attr=paddle.attr.Extra(drop_rate=0.5), + bias_attr=paddle.attr.Param(learning_rate=2), + param_attr=paddle.attr.Param( + initial_std=1. / math.sqrt(embsize * 8), + learning_rate=1)) + predictword = paddle.layer.fc(input=hidden1, + size=dict_size, + bias_attr=paddle.attr.Param(learning_rate=2), + act=paddle.activation.Softmax()) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + result = trainer.test( + paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), 32)) + print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics, + result.metrics) + + cost = paddle.layer.classification_cost(input=predictword, label=nextword) + parameters = paddle.parameters.create(cost) + adam_optimizer = paddle.optimizer.Adam( + learning_rate=3e-3, + regularization=paddle.optimizer.L2Regularization(8e-4)) + trainer = paddle.trainer.SGD(cost, parameters, adam_optimizer) + trainer.train( + paddle.batch(paddle.dataset.imikolov.train(word_dict, N), 32), + num_passes=30, + event_handler=event_handler) + + +if __name__ == '__main__': + main() diff --git a/doc/api/index_cn.rst b/doc/api/index_cn.rst index 3718cd73a2003b8ef6c406a9bd51dc68e76402dc..9be0b370ee5e301aee4a6e31b1cfa905754968e8 100644 --- a/doc/api/index_cn.rst +++ b/doc/api/index_cn.rst @@ -1,37 +1,9 @@ -API中文手册 -============ - -DataProvider API ----------------- - -.. toctree:: - :maxdepth: 1 - - data_provider/dataprovider_cn.rst - data_provider/pydataprovider2_cn.rst - -.. _api_trainer_config: - -Model Config API ----------------- - -.. toctree:: - :maxdepth: 1 - - trainer_config_helpers/optimizers.rst - trainer_config_helpers/data_sources.rst - trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst - trainer_config_helpers/poolings.rst - trainer_config_helpers/networks.rst - trainer_config_helpers/evaluators.rst - trainer_config_helpers/attrs.rst - - -Applications API ----------------- +API +=== .. toctree:: :maxdepth: 1 - predict/swig_py_paddle_cn.rst + 模型配置 + 数据访问 + 训练与应用 diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index 10c297a71d6988c002de868e804ed9ee2345fbd7..25c1dd00b9cbb3ab647e04cdc2b4c27c552a2332 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -1,37 +1,9 @@ API === -DataProvider API ----------------- - -.. toctree:: - :maxdepth: 1 - - data_provider/dataprovider_en.rst - data_provider/pydataprovider2_en.rst - -.. _api_trainer_config: - -Model Config API ----------------- - -.. toctree:: - :maxdepth: 1 - - trainer_config_helpers/optimizers.rst - trainer_config_helpers/data_sources.rst - trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst - trainer_config_helpers/poolings.rst - trainer_config_helpers/networks.rst - trainer_config_helpers/evaluators.rst - trainer_config_helpers/attrs.rst - - -Applications API ----------------- - .. toctree:: :maxdepth: 1 - predict/swig_py_paddle_en.rst + v2/model_configs.rst + v2/data.rst + v2/run_logic.rst diff --git a/doc/api/data_provider/dataprovider_cn.rst b/doc/api/v1/data_provider/dataprovider_cn.rst similarity index 100% rename from doc/api/data_provider/dataprovider_cn.rst rename to doc/api/v1/data_provider/dataprovider_cn.rst diff --git a/doc/api/data_provider/dataprovider_en.rst b/doc/api/v1/data_provider/dataprovider_en.rst similarity index 100% rename from doc/api/data_provider/dataprovider_en.rst rename to doc/api/v1/data_provider/dataprovider_en.rst diff --git a/doc/api/data_provider/pydataprovider2_cn.rst b/doc/api/v1/data_provider/pydataprovider2_cn.rst similarity index 100% rename from doc/api/data_provider/pydataprovider2_cn.rst rename to doc/api/v1/data_provider/pydataprovider2_cn.rst diff --git a/doc/api/data_provider/pydataprovider2_en.rst b/doc/api/v1/data_provider/pydataprovider2_en.rst similarity index 100% rename from doc/api/data_provider/pydataprovider2_en.rst rename to doc/api/v1/data_provider/pydataprovider2_en.rst diff --git a/doc/api/data_provider/src/mnist_config.py b/doc/api/v1/data_provider/src/mnist_config.py similarity index 100% rename from doc/api/data_provider/src/mnist_config.py rename to doc/api/v1/data_provider/src/mnist_config.py diff --git a/doc/api/data_provider/src/mnist_provider.dict.py b/doc/api/v1/data_provider/src/mnist_provider.dict.py similarity index 100% rename from doc/api/data_provider/src/mnist_provider.dict.py rename to doc/api/v1/data_provider/src/mnist_provider.dict.py diff --git a/doc/api/data_provider/src/mnist_train.txt b/doc/api/v1/data_provider/src/mnist_train.txt similarity index 100% rename from doc/api/data_provider/src/mnist_train.txt rename to doc/api/v1/data_provider/src/mnist_train.txt diff --git a/doc/api/data_provider/src/sentimental_config.py b/doc/api/v1/data_provider/src/sentimental_config.py similarity index 100% rename from doc/api/data_provider/src/sentimental_config.py rename to doc/api/v1/data_provider/src/sentimental_config.py diff --git a/doc/api/data_provider/src/sentimental_provider.py b/doc/api/v1/data_provider/src/sentimental_provider.py similarity index 100% rename from doc/api/data_provider/src/sentimental_provider.py rename to doc/api/v1/data_provider/src/sentimental_provider.py diff --git a/doc/api/data_provider/src/sentimental_train.txt b/doc/api/v1/data_provider/src/sentimental_train.txt similarity index 100% rename from doc/api/data_provider/src/sentimental_train.txt rename to doc/api/v1/data_provider/src/sentimental_train.txt diff --git a/doc/api/data_provider/src/train.list b/doc/api/v1/data_provider/src/train.list similarity index 100% rename from doc/api/data_provider/src/train.list rename to doc/api/v1/data_provider/src/train.list diff --git a/doc/api/v1/index_cn.rst b/doc/api/v1/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..3718cd73a2003b8ef6c406a9bd51dc68e76402dc --- /dev/null +++ b/doc/api/v1/index_cn.rst @@ -0,0 +1,37 @@ +API中文手册 +============ + +DataProvider API +---------------- + +.. toctree:: + :maxdepth: 1 + + data_provider/dataprovider_cn.rst + data_provider/pydataprovider2_cn.rst + +.. _api_trainer_config: + +Model Config API +---------------- + +.. toctree:: + :maxdepth: 1 + + trainer_config_helpers/optimizers.rst + trainer_config_helpers/data_sources.rst + trainer_config_helpers/layers.rst + trainer_config_helpers/activations.rst + trainer_config_helpers/poolings.rst + trainer_config_helpers/networks.rst + trainer_config_helpers/evaluators.rst + trainer_config_helpers/attrs.rst + + +Applications API +---------------- + +.. toctree:: + :maxdepth: 1 + + predict/swig_py_paddle_cn.rst diff --git a/doc/api/v1/index_en.rst b/doc/api/v1/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..10c297a71d6988c002de868e804ed9ee2345fbd7 --- /dev/null +++ b/doc/api/v1/index_en.rst @@ -0,0 +1,37 @@ +API +=== + +DataProvider API +---------------- + +.. toctree:: + :maxdepth: 1 + + data_provider/dataprovider_en.rst + data_provider/pydataprovider2_en.rst + +.. _api_trainer_config: + +Model Config API +---------------- + +.. toctree:: + :maxdepth: 1 + + trainer_config_helpers/optimizers.rst + trainer_config_helpers/data_sources.rst + trainer_config_helpers/layers.rst + trainer_config_helpers/activations.rst + trainer_config_helpers/poolings.rst + trainer_config_helpers/networks.rst + trainer_config_helpers/evaluators.rst + trainer_config_helpers/attrs.rst + + +Applications API +---------------- + +.. toctree:: + :maxdepth: 1 + + predict/swig_py_paddle_en.rst diff --git a/doc/api/predict/src/predict_sample.py b/doc/api/v1/predict/src/predict_sample.py similarity index 100% rename from doc/api/predict/src/predict_sample.py rename to doc/api/v1/predict/src/predict_sample.py diff --git a/doc/api/predict/swig_py_paddle_cn.rst b/doc/api/v1/predict/swig_py_paddle_cn.rst similarity index 100% rename from doc/api/predict/swig_py_paddle_cn.rst rename to doc/api/v1/predict/swig_py_paddle_cn.rst diff --git a/doc/api/predict/swig_py_paddle_en.rst b/doc/api/v1/predict/swig_py_paddle_en.rst similarity index 100% rename from doc/api/predict/swig_py_paddle_en.rst rename to doc/api/v1/predict/swig_py_paddle_en.rst diff --git a/doc/api/trainer_config_helpers/activations.rst b/doc/api/v1/trainer_config_helpers/activations.rst similarity index 100% rename from doc/api/trainer_config_helpers/activations.rst rename to doc/api/v1/trainer_config_helpers/activations.rst diff --git a/doc/api/trainer_config_helpers/attrs.rst b/doc/api/v1/trainer_config_helpers/attrs.rst similarity index 100% rename from doc/api/trainer_config_helpers/attrs.rst rename to doc/api/v1/trainer_config_helpers/attrs.rst diff --git a/doc/api/trainer_config_helpers/data_sources.rst b/doc/api/v1/trainer_config_helpers/data_sources.rst similarity index 100% rename from doc/api/trainer_config_helpers/data_sources.rst rename to doc/api/v1/trainer_config_helpers/data_sources.rst diff --git a/doc/api/trainer_config_helpers/evaluators.rst b/doc/api/v1/trainer_config_helpers/evaluators.rst similarity index 100% rename from doc/api/trainer_config_helpers/evaluators.rst rename to doc/api/v1/trainer_config_helpers/evaluators.rst diff --git a/doc/api/trainer_config_helpers/layers.rst b/doc/api/v1/trainer_config_helpers/layers.rst similarity index 96% rename from doc/api/trainer_config_helpers/layers.rst rename to doc/api/v1/trainer_config_helpers/layers.rst index 2793d6afd9565eb461c8657b838b146fe1992b20..24389c2d8574dfda4bec9298776aa6b1aee51535 100644 --- a/doc/api/trainer_config_helpers/layers.rst +++ b/doc/api/v1/trainer_config_helpers/layers.rst @@ -139,24 +139,12 @@ lstmemory :members: lstmemory :noindex: -lstm_step_layer ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: lstm_step_layer - :noindex: - grumemory --------- .. automodule:: paddle.trainer_config_helpers.layers :members: grumemory :noindex: -gru_step_layer ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: gru_step_layer - :noindex: - Recurrent Layer Group ===================== @@ -172,6 +160,18 @@ recurrent_group :members: recurrent_group :noindex: +lstm_step_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lstm_step_layer + :noindex: + +gru_step_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: gru_step_layer + :noindex: + beam_search ------------ .. automodule:: paddle.trainer_config_helpers.layers @@ -308,6 +308,12 @@ repeat_layer :members: repeat_layer :noindex: +rotate_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: rotate_layer + :noindex: + seq_reshape_layer ----------------- .. automodule:: paddle.trainer_config_helpers.layers @@ -426,6 +432,12 @@ multi_binary_label_cross_entropy :members: multi_binary_label_cross_entropy :noindex: +mse_cost +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: mse_cost + :noindex: + huber_cost ---------- .. automodule:: paddle.trainer_config_helpers.layers @@ -444,6 +456,12 @@ rank_cost :members: rank_cost :noindex: +sum_cost +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sum_cost + :noindex: + crf_layer ----------------- .. automodule:: paddle.trainer_config_helpers.layers @@ -462,6 +480,12 @@ ctc_layer :members: ctc_layer :noindex: +warp_ctc_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: warp_ctc_layer + :noindex: + nce_layer ----------- .. automodule:: paddle.trainer_config_helpers.layers @@ -474,12 +498,6 @@ hsigmoid :members: hsigmoid :noindex: -sum_cost ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: sum_cost - :noindex: - Check Layer ============ diff --git a/doc/api/trainer_config_helpers/networks.rst b/doc/api/v1/trainer_config_helpers/networks.rst similarity index 100% rename from doc/api/trainer_config_helpers/networks.rst rename to doc/api/v1/trainer_config_helpers/networks.rst diff --git a/doc/api/trainer_config_helpers/optimizers.rst b/doc/api/v1/trainer_config_helpers/optimizers.rst similarity index 100% rename from doc/api/trainer_config_helpers/optimizers.rst rename to doc/api/v1/trainer_config_helpers/optimizers.rst diff --git a/doc/api/trainer_config_helpers/poolings.rst b/doc/api/v1/trainer_config_helpers/poolings.rst similarity index 100% rename from doc/api/trainer_config_helpers/poolings.rst rename to doc/api/v1/trainer_config_helpers/poolings.rst diff --git a/doc/api/v2/config/activation.rst b/doc/api/v2/config/activation.rst new file mode 100644 index 0000000000000000000000000000000000000000..eca3ce03bcdc599edca802d8dfca48d4f28275a2 --- /dev/null +++ b/doc/api/v2/config/activation.rst @@ -0,0 +1,101 @@ +=========== +Activation +=========== + +Abs +=== + +.. automodule:: paddle.v2.activation + :members: Abs + :noindex: + +Exp +=== + +.. automodule:: paddle.v2.activation + :members: Exp + :noindex: + +Identity +======== + +.. automodule:: paddle.v2.activation + :members: Identity + :noindex: + +Linear +====== + +.. automodule:: paddle.v2.activation + :members: Linear + :noindex: + +Log +=== + +.. automodule:: paddle.v2.activation + :members: Log + :noindex: + +Square +====== + +.. automodule:: paddle.v2.activation + :members: Square + :noindex: + +Sigmoid +======= + +.. automodule:: paddle.v2.activation + :members: Sigmoid + :noindex: + +Softmax +======= + +.. automodule:: paddle.v2.activation + :members: Softmax + :noindex: + +SequenceSoftmax +=============== + +.. automodule:: paddle.v2.activation + :members: SequenceSoftmax + :noindex: + +Relu +==== + +.. automodule:: paddle.v2.activation + :members: Relu + :noindex: + +BRelu +===== + +.. automodule:: paddle.v2.activation + :members: BRelu + :noindex: + +SoftRelu +======== + +.. automodule:: paddle.v2.activation + :members: SoftRelu + :noindex: + +Tanh +==== + +.. automodule:: paddle.v2.activation + :members: Tanh + :noindex: + +STanh +===== + +.. automodule:: paddle.v2.activation + :members: STanh + :noindex: diff --git a/doc/api/v2/config/attr.rst b/doc/api/v2/config/attr.rst new file mode 100644 index 0000000000000000000000000000000000000000..a93f41b86779200d8bac651614f4d61f4895875f --- /dev/null +++ b/doc/api/v2/config/attr.rst @@ -0,0 +1,6 @@ +Parameter Attribute +=================== + +.. automodule:: paddle.v2.attr + :members: + :noindex: diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst new file mode 100644 index 0000000000000000000000000000000000000000..05817ec85455ac58566e90956a54cb86541f8488 --- /dev/null +++ b/doc/api/v2/config/layer.rst @@ -0,0 +1,493 @@ +.. _api_v2.layer: + +====== +Layers +====== + +Data layer +=========== + +.. _api_v2.layer_data: + +data +---- +.. automodule:: paddle.v2.layer + :members: data + :noindex: + +Fully Connected Layers +====================== + +.. _api_v2.layer_fc: + +fc +-- +.. automodule:: paddle.v2.layer + :members: fc + :noindex: + +selective_fc +------------ +.. automodule:: paddle.v2.layer + :members: selective_fc + :noindex: + +Conv Layers +=========== + +conv_operator +------------- +.. automodule:: paddle.v2.layer + :members: conv_operator + :noindex: + +conv_projection +--------------- +.. automodule:: paddle.v2.layer + :members: conv_projection + :noindex: + +conv_shift +---------- +.. automodule:: paddle.v2.layer + :members: conv_shift + :noindex: + +img_conv +-------- +.. automodule:: paddle.v2.layer + :members: img_conv + :noindex: + +.. _api_v2.layer_context_projection: + +context_projection +------------------ +.. automodule:: paddle.v2.layer + :members: context_projection + :noindex: + +Image Pooling Layer +=================== + +img_pool +-------- +.. automodule:: paddle.v2.layer + :members: img_pool + :noindex: + +spp +--- +.. automodule:: paddle.v2.layer + :members: spp + :noindex: + +maxout +------ +.. automodule:: paddle.v2.layer + :members: maxout + :noindex: + +Norm Layer +========== + +img_cmrnorm +----------- +.. automodule:: paddle.v2.layer + :members: img_cmrnorm + :noindex: + +batch_norm +---------- +.. automodule:: paddle.v2.layer + :members: batch_norm + :noindex: + +sum_to_one_norm +--------------- +.. automodule:: paddle.v2.layer + :members: sum_to_one_norm + :noindex: + +cross_channel_norm +------------------ +.. automodule:: paddle.v2.layer + :members: cross_channel_norm + :noindex: + +Recurrent Layers +================ + +recurrent +--------- +.. automodule:: paddle.v2.layer + :members: recurrent + :noindex: + +lstmemory +--------- +.. automodule:: paddle.v2.layer + :members: lstmemory + :noindex: + +grumemory +--------- +.. automodule:: paddle.v2.layer + :members: grumemory + :noindex: + +Recurrent Layer Group +===================== + +memory +------ +.. automodule:: paddle.v2.layer + :members: memory + :noindex: + +recurrent_group +--------------- +.. automodule:: paddle.v2.layer + :members: recurrent_group + :noindex: + +lstm_step +--------- +.. automodule:: paddle.v2.layer + :members: lstm_step + :noindex: + +gru_step +-------- +.. automodule:: paddle.v2.layer + :members: gru_step + :noindex: + +beam_search +------------ +.. automodule:: paddle.v2.layer + :members: beam_search + :noindex: + +get_output +---------- +.. automodule:: paddle.v2.layer + :members: get_output + :noindex: + +Mixed Layer +=========== + +.. _api_v2.layer_mixed: + +mixed +----- +.. automodule:: paddle.v2.layer + :members: mixed + :noindex: + +.. _api_v2.layer_embedding: + +embedding +--------- +.. automodule:: paddle.v2.layer + :members: embedding + :noindex: + +scaling_projection +------------------ +.. automodule:: paddle.v2.layer + :members: scaling_projection + :noindex: + +dotmul_projection +----------------- +.. automodule:: paddle.v2.layer + :members: dotmul_projection + :noindex: + +dotmul_operator +--------------- +.. automodule:: paddle.v2.layer + :members: dotmul_operator + :noindex: + +full_matrix_projection +---------------------- +.. automodule:: paddle.v2.layer + :members: full_matrix_projection + :noindex: + +identity_projection +------------------- +.. automodule:: paddle.v2.layer + :members: identity_projection + :noindex: + + +table_projection +---------------- +.. automodule:: paddle.v2.layer + :members: table_projection + :noindex: + +trans_full_matrix_projection +---------------------------- +.. automodule:: paddle.v2.layer + :members: trans_full_matrix_projection + :noindex: + +Aggregate Layers +================ + +.. _api_v2.layer_pooling: + +pooling +------- +.. automodule:: paddle.v2.layer + :members: pooling + :noindex: + +.. _api_v2.layer_last_seq: + +last_seq +-------- +.. automodule:: paddle.v2.layer + :members: last_seq + :noindex: + +.. _api_v2.layer_first_seq: + +first_seq +--------- +.. automodule:: paddle.v2.layer + :members: first_seq + :noindex: + +concat +------ +.. automodule:: paddle.v2.layer + :members: concat + :noindex: + +seq_concat +---------- +.. automodule:: paddle.v2.layer + :members: seq_concat + :noindex: + +Reshaping Layers +================ + +block_expand +------------ +.. automodule:: paddle.v2.layer + :members: block_expand + :noindex: + +.. _api_v2.layer_expand: + +expand +------ +.. automodule:: paddle.v2.layer + :members: expand + :noindex: + +repeat +------ +.. automodule:: paddle.v2.layer + :members: repeat + :noindex: + +rotate +------ +.. automodule:: paddle.v2.layer + :members: rotate + :noindex: + +seq_reshape +----------- +.. automodule:: paddle.v2.layer + :members: seq_reshape + :noindex: + +Math Layers +=========== + +addto +----- +.. automodule:: paddle.v2.layer + :members: addto + :noindex: + +linear_comb +----------- +.. automodule:: paddle.v2.layer + :members: linear_comb + :noindex: + +interpolation +------------- +.. automodule:: paddle.v2.layer + :members: interpolation + :noindex: + +bilinear_interp +--------------- +.. automodule:: paddle.v2.layer + :members: bilinear_interp + :noindex: + +power +----- +.. automodule:: paddle.v2.layer + :members: power + :noindex: + +scaling +------- +.. automodule:: paddle.v2.layer + :members: scaling + :noindex: + +slope_intercept +--------------- +.. automodule:: paddle.v2.layer + :members: slope_intercept + :noindex: + +tensor +------ +.. automodule:: paddle.v2.layer + :members: tensor + :noindex: + +.. _api_v2.layer_cos_sim: + +cos_sim +------- +.. automodule:: paddle.v2.layer + :members: cos_sim + :noindex: + +trans +----- +.. automodule:: paddle.v2.layer + :members: trans + :noindex: + +Sampling Layers +=============== + +maxid +----- +.. automodule:: paddle.v2.layer + :members: maxid + :noindex: + +sampling_id +----------- +.. automodule:: paddle.v2.layer + :members: sampling_id + :noindex: + +Slicing and Joining Layers +========================== + +pad +---- +.. automodule:: paddle.v2.layer + :members: pad + :noindex: + +.. _api_v2.layer_costs: + +Cost Layers +=========== + +cross_entropy_cost +------------------ +.. automodule:: paddle.v2.layer + :members: cross_entropy_cost + :noindex: + +cross_entropy_with_selfnorm_cost +-------------------------------- +.. automodule:: paddle.v2.layer + :members: cross_entropy_with_selfnorm_cost + :noindex: + +multi_binary_label_cross_entropy_cost +------------------------------------- +.. automodule:: paddle.v2.layer + :members: multi_binary_label_cross_entropy_cost + :noindex: + +huber_cost +---------- +.. automodule:: paddle.v2.layer + :members: huber_cost + :noindex: + +lambda_cost +----------- +.. automodule:: paddle.v2.layer + :members: lambda_cost + :noindex: + +rank_cost +--------- +.. automodule:: paddle.v2.layer + :members: rank_cost + :noindex: + +sum_cost +--------- +.. automodule:: paddle.v2.layer + :members: sum_cost + :noindex: + +crf +--- +.. automodule:: paddle.v2.layer + :members: crf + :noindex: + +crf_decoding +------------ +.. automodule:: paddle.v2.layer + :members: crf_decoding + :noindex: + +ctc +--- +.. automodule:: paddle.v2.layer + :members: ctc + :noindex: + +warp_ctc +-------- +.. automodule:: paddle.v2.layer + :members: warp_ctc + :noindex: + +nce +--- +.. automodule:: paddle.v2.layer + :members: nce + :noindex: + +hsigmoid +--------- +.. automodule:: paddle.v2.layer + :members: hsigmoid + :noindex: + +Check Layer +============ + +eos +--- +.. automodule:: paddle.v2.layer + :members: eos + :noindex: diff --git a/doc/api/v2/config/networks.rst b/doc/api/v2/config/networks.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f209bc95bec7279051118bb857a96515e0371a9 --- /dev/null +++ b/doc/api/v2/config/networks.rst @@ -0,0 +1,117 @@ +======== +Networks +======== + +The v2.networks module contains pieces of neural network that combine multiple layers. + +NLP +=== + +sequence_conv_pool +------------------ +.. automodule:: paddle.v2.networks + :members: sequence_conv_pool + :noindex: + +.. _api_trainer_config_helpers_network_text_conv_pool: + +text_conv_pool +-------------- +.. automodule:: paddle.v2.networks + :members: text_conv_pool + :noindex: + +Images +====== + +img_conv_bn_pool +---------------- +.. automodule:: paddle.v2.networks + :members: img_conv_bn_pool + :noindex: + +img_conv_group +-------------- +.. automodule:: paddle.v2.networks + :members: img_conv_group + :noindex: + +.. _api_trainer_config_helpers_network_simple_img_conv_pool: + +simple_img_conv_pool +-------------------- +.. automodule:: paddle.v2.networks + :members: simple_img_conv_pool + :noindex: + +vgg_16_network +--------------- +.. automodule:: paddle.v2.networks + :members: vgg_16_network + :noindex: + +Recurrent +========= + +LSTM +---- + +lstmemory_unit +`````````````` +.. automodule:: paddle.v2.networks + :members: lstmemory_unit + :noindex: + +lstmemory_group +``````````````` +.. automodule:: paddle.v2.networks + :members: lstmemory_group + :noindex: + +simple_lstm +``````````` +.. automodule:: paddle.v2.networks + :members: simple_lstm + :noindex: + +bidirectional_lstm +`````````````````` +.. automodule:: paddle.v2.networks + :members: bidirectional_lstm + :noindex: + +GRU +--- + +gru_unit +```````` +.. automodule:: paddle.v2.networks + :members: gru_unit + :noindex: + +gru_group +````````` +.. automodule:: paddle.v2.networks + :members: gru_group + :noindex: + +simple_gru +`````````` +.. automodule:: paddle.v2.networks + :members: simple_gru + :noindex: + +simple_attention +---------------- +.. automodule:: paddle.v2.networks + :members: simple_attention + :noindex: + +Miscs +===== + +dropout_layer +-------------- +.. automodule:: paddle.v2.networks + :members: dropout_layer + :noindex: diff --git a/doc/api/v2/config/optimizer.rst b/doc/api/v2/config/optimizer.rst new file mode 100644 index 0000000000000000000000000000000000000000..b32373fdef52a7aa9d64b12cda3f76cb2abf351b --- /dev/null +++ b/doc/api/v2/config/optimizer.rst @@ -0,0 +1,45 @@ +========== +Optimizer +========== + +Momentum +======== +.. automodule:: paddle.v2.optimizer + :members: Momentum + :noindex: + +Adam +==== +.. automodule:: paddle.v2.optimizer + :members: Adam + :noindex: + +Adamax +====== +.. automodule:: paddle.v2.optimizer + :members: Adamax + :noindex: + +AdaGrad +======= +.. automodule:: paddle.v2.optimizer + :members: AdaGrad + :noindex: + +DecayedAdaGrad +============== +.. automodule:: paddle.v2.optimizer + :members: DecayedAdaGrad + :noindex: + +AdaDelta +======== +.. automodule:: paddle.v2.optimizer + :members: AdaDelta + :noindex: + +RMSProp +======= +.. automodule:: paddle.v2.optimizer + :members: RMSProp + :noindex: diff --git a/doc/api/v2/config/pooling.rst b/doc/api/v2/config/pooling.rst new file mode 100644 index 0000000000000000000000000000000000000000..d26b365c9284632210a1532853e39feedc70758b --- /dev/null +++ b/doc/api/v2/config/pooling.rst @@ -0,0 +1,46 @@ +======= +Pooling +======= + +BasePool +======== +.. automodule:: paddle.v2.pooling + :members: BasePool + :noindex: + +Avg +=== +.. automodule:: paddle.v2.pooling + :members: Avg + :noindex: + +Max +=== +.. automodule:: paddle.v2.pooling + :members: Max + :noindex: + +Sum +=== +.. automodule:: paddle.v2.pooling + :members: Sum + :noindex: + +SquareRootN +=========== +.. automodule:: paddle.v2.pooling + :members: SquareRootN + :noindex: + +CudnnAvg +======== +.. automodule:: paddle.v2.pooling + :members: CudnnAvg + :noindex: + +CudnnMax +======== +.. automodule:: paddle.v2.pooling + :members: CudnnMax + :noindex: + diff --git a/doc/api/v2/data.rst b/doc/api/v2/data.rst new file mode 100644 index 0000000000000000000000000000000000000000..fef87c4fbdb452771ecdb361c6eeae5b32bcee14 --- /dev/null +++ b/doc/api/v2/data.rst @@ -0,0 +1,113 @@ +================================== +Data Reader Interface and DataSets +================================== + + +DataTypes +========= + +.. automodule:: paddle.v2.data_type + :members: + :noindex: + +DataFeeder +========== + +.. automodule:: paddle.v2.data_feeder + :members: + :noindex: + +Reader +====== + +.. automodule:: paddle.v2.reader + :members: + :noindex: + +.. automodule:: paddle.v2.reader.creator + :members: + :noindex: + +minibatch +========= + +.. automodule:: paddle.v2.minibatch + :members: + :noindex: + +Dataset +======= + +.. automodule:: paddle.v2.dataset + :members: + :noindex: + +mnist ++++++ + +.. automodule:: paddle.v2.dataset.mnist + :members: + :noindex: + +cifar ++++++ + +.. automodule:: paddle.v2.dataset.cifar + :members: + :noindex: + +conll05 ++++++++ + +.. automodule:: paddle.v2.dataset.conll05 + :members: get_dict,get_embedding,test + :noindex: + +imdb +++++ + +.. automodule:: paddle.v2.dataset.imdb + :members: + :noindex: + +imikolov +++++++++ + +.. automodule:: paddle.v2.dataset.imikolov + :members: + :noindex: + +movielens ++++++++++ + +.. automodule:: paddle.v2.dataset.movielens + :members: + :noindex: + +.. autoclass:: paddle.v2.dataset.movielens.MovieInfo + :noindex: + +.. autoclass:: paddle.v2.dataset.movielens.UserInfo + :noindex: + +sentiment ++++++++++ + +.. automodule:: paddle.v2.dataset.sentiment + :members: + :noindex: + +uci_housing ++++++++++++ + +.. automodule:: paddle.v2.dataset.uci_housing + :members: + :noindex: + +wmt14 ++++++ + +.. automodule:: paddle.v2.dataset.wmt14 + :members: + :noindex: + diff --git a/doc/api/v2/model_configs.rst b/doc/api/v2/model_configs.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5fae7e29e56d9e236d489353a5c8967d1954641 --- /dev/null +++ b/doc/api/v2/model_configs.rst @@ -0,0 +1,12 @@ +Model Configuration +=================== + +.. toctree:: + :maxdepth: 1 + + config/activation.rst + config/layer.rst + config/optimizer.rst + config/pooling.rst + config/networks.rst + config/attr.rst diff --git a/doc/api/v2/run_logic.rst b/doc/api/v2/run_logic.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c97651f6536d89d2b5926d4b2907a547aa86b55 --- /dev/null +++ b/doc/api/v2/run_logic.rst @@ -0,0 +1,31 @@ +====================== +Training and Inference +====================== + +Parameters +========== + +.. automodule:: paddle.v2.parameters + :members: Parameters + :noindex: + +Trainer +======= + +.. automodule:: paddle.v2.trainer + :members: SGD + :noindex: + +Event +===== + +.. automodule:: paddle.v2.event + :members: + :noindex: + +Inference +========= + +.. autofunction:: paddle.v2.infer + :noindex: + \ No newline at end of file diff --git a/doc/design/dist/README.md b/doc/design/dist/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1788208bcabca30f66cb1c80e80f6b824c0d9579 --- /dev/null +++ b/doc/design/dist/README.md @@ -0,0 +1,172 @@ +# Design Doc: Distributed Training + +## Objective + +In [this slides](https://www.slideshare.net/cxwangyi/paddlepaddle-a-complete-solution-for-businesses), we explained that we'd like PaddlePaddle running on general-purpose clusters like those managed by Kubernetes, so to address demands for AI from both Internet and non-Internet industries. + +This poses technical challenges to PaddlePaddle: + +1. Support fault-recovery. +1. Support both offline and online training. +1. [Serverless computing](https://en.wikipedia.org/wiki/Serverless_computing) of distributed training. + + +## Training Job + +A training job will be created once user asks Paddle cloud to train a model. The training job is made up of different processes that collaboratively consume data and produce a trained model. There are three kinds of processes: + +1. the *master process*, which dispatches tasks to +1. one or more *trainer processes*, which run distributed training and synchronize gradients/models via +1. one or more *parameter server processes*, where each holds a shard of the global model. + +Their relation is illustrated in the following graph: + + + +### Master Process + +The master process will: + +- Partition a dataset into [tasks](#task) and dispatch tasks to trainers. +- Keep track of training progress on the dataset with [task queue](#task-queue). A training job will iterate on the dataset for a full pass until it goes into next pass. + + +#### Task + +A task is a data shard to be trained. The total number of tasks will be much bigger than the total number of trainers. The number of data instances inside a task will be much bigger than the mini-batch size. + +#### Task Queue + +The master process has three task queues to track training progress. As illustrated in the graph below, Job A and Job B both have one master process. Each master process has three task queues. + + + +- The todo queue holds tasks to be dispatched. When a job starts, the master process fills in the todo queue with all tasks. +- The pending queue holds tasks that are currently training by trainers. +- the done queue holds tasks that are already trained. + +The life cycle of a single task is illustrated below: + + + +1. When a new pass of training starts, all tasks will be placed in the todo queue. +1. The master process will dispatch few tasks to each trainer at a time, puts them in the pending queue and waits for completion. +1. The trainer will work on its tasks and tell the master process once a task is completed. The master process will dispatch a new task to that trainer. +1. If a task timeout. the master process will move it back to the todo queue. The timeout count will increase by one. If the timeout count is above a threshold, the task is likely to cause a trainer to crash, so it will be discarded. +1. The master process will move completed task to the done queue. When the todo queue is empty, the master process will start a new pass by moving all tasks in the done queue to todo queue and reset the timeout counter of all tasks to zero. + +### Trainer Process + +The trainer process will: + +- Receive tasks from the master. +- Work on the tasks: calculate and upload gradient to parameter servers, and update local model by downloading new parameters from parameter servers. + +### Parameter Server Process + +Parameter server processes hold the parameters collaboratively. The parameters are partitioned on different parameter servers. + +The parameter server will: + +- Receive gradient from the trainers, update its parameters, and give the trainers the latest parameters. +- Periodically save its parameters to distributed file system by overriding the previous save. + +### Optimization Algorithms + +The communication pattern between the trainers and the parameter servers depends on the category of optimization algorithm: + +- Synchronous Stochastic Gradient Descent (sync-SGD) + + Parameter server will wait for all trainer finish n-th mini-batch calculation and send their gradients before broadcasting new parameters to every trainer. Every trainer will wait for the new parameters before starting n+1-th mini-batch. + +- Asynchronous Stochastic Gradient Descent (async-SGD) + + There will no synchronization between different trainers, and parameter server updates its parameter as soon as it receives new gradient: + + - Each trainer uploads its accumulated gradient every n mini-batches. + - Every m mini-batches, the trainer downloads new parameters from parameter server. + - n and m do not have to be equal. + +## Fault Tolerant + +The training job will pause if the master processes is dead, or any of the parameter server process is dead. They will be started by [Kubernetes](https://kubernetes.io/) and recover in few minutes. Please refer to [fault recovery](#fault-recovery). + +The training job will continue to make progress if there is at least one training process running. The strategy depends on the type of optimization algorithm: + +- sync-SGD + + TODO + +- async-SGD + + Since async-SGD does not require synchronization between mini-batches, the system will by definition make process if at least one trainer is running. + +## Fault Recovery + +PaddlePaddle uses [etcd](https://github.com/coreos/etcd) to keep track of the states of processes. Because etcd is a distributed reliable key-value store, the restarted process can recover its states from etcd. The model parameters are periodically saved into distributed file system, so a restarted parameter server can recover its parameters from the saved file. + +Now we will introduce how each process recovers from a failure, the graph below shows how etcd is used: + + + +### Master Process + +When the master is started by the Kubernetes, it executes the following steps at startup: + +1. Grabs a unique *master* lock in etcd, which prevents concurrent master instantiations. +1. Recovers the task queues from etcd if they already exist, otherwise, the master will create them. +1. Watches the trainer prefix keys `/trainer/` on etcd to find the live trainers. +1. Starts dispatching the tasks to the trainers, and updates task queue using an etcd transaction to ensure lock is held during the update. + +The master process will kill itself if its etcd lease expires. + +When the master process is dead for any reason, Kubernetes will restart it. It will be online again with all states recovered from etcd in few minutes. + +### Trainer Process + +When the trainer is started by the Kubernetes, it executes the following steps at startup: + +1. Watches the available parameter server prefix keys `/ps/` on etcd and waits until the count of parameter servers reaches the desired count. +1. Generates a unique ID, and sets key `/trainer/` with its contact address as value. The key will be deleted when the lease expires, so the master will be aware of the trainer being online and offline. +1. Waits for tasks from the master to start training. + +If trainer's etcd lease expires, it will try set key `/trainer/` again so that the master process can discover the trainer again. + +### Parameter Server Process + +When the parameter server is started by Kubernetes, it executes the following steps at startup: + +1. Read desired total number of parameter servers from etcd `/ps_desired` +1. Search through etcd keys `/ps/` (`/ps/0`, `/ps/1`, ...) to find the first non-existant key whose index is smaller than the total number of parameter servers. Set the key using a transaction to avoid concurrent writes. The parameter server's index is inferred from the key name. + + The desired number of parameter servers is 3: + + + + The third parameter server joined: + + + +1. The parameter server can load parameters if there are already saved parameters in the save path (inferred from its index). +1. Now the parameter server is ready for the trainers' requests. + +If the parameter server's etcd lease expires, the parameter server will kill itself. + + +## Dynamic Scaling + +### Trainer Scaling + +TODO + +### Parameter Server Scaling + +Not planned for v1. + +## Training Dataset Format + +TODO + +## User Interface + +TODO diff --git a/doc/design/dist/src/paddle-etcd.graffle b/doc/design/dist/src/paddle-etcd.graffle new file mode 100644 index 0000000000000000000000000000000000000000..56681ae5bbe11849116d621b066a6317e003e4ca Binary files /dev/null and b/doc/design/dist/src/paddle-etcd.graffle differ diff --git a/doc/design/dist/src/paddle-etcd.png b/doc/design/dist/src/paddle-etcd.png new file mode 100644 index 0000000000000000000000000000000000000000..4f9c9762b3a8c089dd5e9b2c07cb9dfc78296a21 Binary files /dev/null and b/doc/design/dist/src/paddle-etcd.png differ diff --git a/doc/design/dist/src/paddle-model-sharding.graffle b/doc/design/dist/src/paddle-model-sharding.graffle new file mode 100644 index 0000000000000000000000000000000000000000..fba30f0ca2b47f0d202a432821d95e55aac37ec8 Binary files /dev/null and b/doc/design/dist/src/paddle-model-sharding.graffle differ diff --git a/doc/design/dist/src/paddle-model-sharding.png b/doc/design/dist/src/paddle-model-sharding.png new file mode 100644 index 0000000000000000000000000000000000000000..8c3f6724ef46c6527e63a4cd8cb0b50fe0167124 Binary files /dev/null and b/doc/design/dist/src/paddle-model-sharding.png differ diff --git a/doc/design/dist/src/paddle-ps-0.png b/doc/design/dist/src/paddle-ps-0.png new file mode 100644 index 0000000000000000000000000000000000000000..47ef32806f182cab003da77f1556823b3f6d1721 Binary files /dev/null and b/doc/design/dist/src/paddle-ps-0.png differ diff --git a/doc/design/dist/src/paddle-ps-1.png b/doc/design/dist/src/paddle-ps-1.png new file mode 100644 index 0000000000000000000000000000000000000000..f3125db73096c52bac6e7c60e1675552857c0774 Binary files /dev/null and b/doc/design/dist/src/paddle-ps-1.png differ diff --git a/doc/design/dist/src/paddle-ps.graffle b/doc/design/dist/src/paddle-ps.graffle new file mode 100644 index 0000000000000000000000000000000000000000..0e536ffdd91cd696008b4c01bad3cb53edebdc16 Binary files /dev/null and b/doc/design/dist/src/paddle-ps.graffle differ diff --git a/doc/design/dist/src/paddle-task-queues.graffle b/doc/design/dist/src/paddle-task-queues.graffle new file mode 100644 index 0000000000000000000000000000000000000000..4263ed8bfd2ef0e55058828bf23f2fac3595e5fd Binary files /dev/null and b/doc/design/dist/src/paddle-task-queues.graffle differ diff --git a/doc/design/dist/src/paddle-task-queues.png b/doc/design/dist/src/paddle-task-queues.png new file mode 100644 index 0000000000000000000000000000000000000000..5f980266795776752cebd0c346b85c4a75a47780 Binary files /dev/null and b/doc/design/dist/src/paddle-task-queues.png differ diff --git a/doc/design/dist/src/paddle-task-states.graffle b/doc/design/dist/src/paddle-task-states.graffle new file mode 100644 index 0000000000000000000000000000000000000000..cf1a0b9246d9386a949d2dbb8c32fe84f72eea83 Binary files /dev/null and b/doc/design/dist/src/paddle-task-states.graffle differ diff --git a/doc/design/dist/src/paddle-task-states.png b/doc/design/dist/src/paddle-task-states.png new file mode 100644 index 0000000000000000000000000000000000000000..4ae43cb66c071aee9eb90d875e2373b29af9c3e0 Binary files /dev/null and b/doc/design/dist/src/paddle-task-states.png differ diff --git a/doc/design/multi_language_interface/why_plain_c.md b/doc/design/multi_language_interface/why_plain_c.md new file mode 100644 index 0000000000000000000000000000000000000000..a3f41ca7b93de8a55d927c88812802ef12246182 --- /dev/null +++ b/doc/design/multi_language_interface/why_plain_c.md @@ -0,0 +1,118 @@ +# Paddle多语言接口实现 +## 背景 + +Paddle需要一个多语言接口,这个接口需要做到: + +* 有标准的,良好的文档 + * 例如Python可以使用[Sphinx](http://www.sphinx-doc.org/en/stable/)生成API文档,golang可以使用[GoDoc](https://godoc.org/golang.org/x/tools/cmd/godoc)生成文档。这都需要这个接口按照约定俗成的规则来注释完备。 +* 不同语言的接口适应不同语言的特性 + * 例如Java与Python的错误处理是直接扔出来Exception,而对于golang错误处理应该使用返回值。 + +## 基本要求 + +Paddle的多语言接口实现包括一下几个方面: + +* 我们使用动态库来分发Paddle。在这个动态库中不嵌入任何其他语言的解释器,也不使用其他动态库。 +* 这个动态库使用C99标准的头文件导出一些函数,不使用/导出C++符号。 +* 不导出Paddle内部的结构体、类,仅仅使用`void*`指针作为类型的句柄(handler)。 +* 不使用SWIG这种代码生成器,而是手写多语言绑定。 + + +## 原因 + +### 使用动态库来分发Paddle + +* Paddle的链接方式比较复杂 + * 如果用户要把Paddle的静态库(libpaddle.a)链接到自己的程序里,得使用 `--whole-archive` (for GCC) 或者 `--force_load` (for Clang) 参数,来确保把 libpaddle.a 里所有的符号都写入自己的程序的二进制文件里。这是因为 Paddle 的源码里使用了[object factory design pattern](http://stackoverflow.com/a/1310326/724872)。 +* 编译型语言,例如C/C++使用静态库和动态库难度差不多。但是解释性语言,例如[Python](http://stackoverflow.com/questions/19560594/how-to-import-static-library-in-python)或者[Java](http://stackoverflow.com/questions/24493337/linking-static-library-with-jni),只能调用Paddle的动态库,否则得把Paddle静态库链接到解释器里。 + * 解释性语言实际运行的二进制是解释器本身,如果调用静态库只能将静态库与解释器链接。例如对于Java来说,便是将静态库加入JVM中。这对于通常的Java的开发者来说,是不常见的做法。 + +### 动态库中不嵌入任何其他语言的解释器 + +* 目前Paddle的进程模型是C++内部驱动Python解释器进行模型配置解析和数据读取 +* 我们最终的动态库中不嵌入Python或者其他任何语言的解释器。模型配置解析,数据读取均交由其他语言完成 + +现阶段Paddle有一个问题是,Paddle内嵌的Python解释器和外部使用的Python如果版本不同,会直接报错退出。 + +### Paddle动态库中,不引用其他动态库 + +* 即这个动态库是不依赖于其他任何文件的,可以在任何机器上执行的。 + +### 这个动态库使用C99标准的头文件导出一些函数,不使用/导出C++符号 + +* 由于C++编译器没有[名字修饰](https://en.wikipedia.org/wiki/Name_mangling#C.2B.2B)的规范,不同版本的编译器之间,对于同一段C++代码生成的符号可能不一致。而多语言接口需要直接读取生成的二进制(动态库),需要有稳定的导出符号。 +* C语言是有导出符号的标准的,并且在常见的平台上,都是ABI调用标准的。 +* 大多数语言都支持使用C语言API +* 使用C99而不使用C89,是因为C99支持[Fixed-width integer types](https://en.wikipedia.org/wiki/C_data_types#Fixed-width_integer_types)和[Boolean type](https://en.wikipedia.org/wiki/C_data_types#Boolean_type)。 +* 使用C99而不使用C11的原因是,[C11](https://en.wikipedia.org/wiki/C11_(C_standard_revision))并没有Paddle特别需要的特性,且C99相对于C11使用更加广泛。 + +### 不导出Paddle内部的结构体、类,仅仅使用`void*`指针作为类型的句柄(handler) + +* Paddle内部的类为C++书写,直接导出到C的接口比较困难。 +* 在C-API中使用`void*`来表示Paddle内部类。再在每一个API中自己检查类型。 + +在C的头文件 `paddle_matrix.h` 中: + +```C +typedef void* paddle_matrix; +typedef int paddle_error; + +extern "C" +paddle_error paddle_matrix_shape(paddle_matrix matrix, + uint64_t* width, + uint64_t* height); +``` +而在CPP里面实现这个C的接口,文件 `paddle_matrix.cpp` + +```cpp +#include "paddle/math/matrix.hpp" +extern "C" +paddle_error paddle_matrix_shape(paddle_matrix matrix, + uint64_t *width, + uint64_t *height) { + auto m = (paddle::math::matrix*)(matrix); + *width = m->width(); + *height = m->height(); +} +``` + +其中`paddle/math/matrix.hpp`文件内容为: + +```cpp +namespace paddle { +namespace math { + +class Matrix { + //... +}; + +} // namespace math +} // namespace paddle +``` + +### 不使用SWIG这种代码生成器,而是手写多语言绑定 + +* [SWIG](http://www.swig.org/)是一个多语言接口的代码生成器。他的目标是使用C/C++写代码,SWIG直接读取C/C++的头文件,生成各种语言的绑定代码。 + * 对于多语言接口,SWIG需要写一个interface文件。这个文件具有独特的语法,学习成本高。且增加一个第三方语言,就需要对这个第三方语言增加一些定义。有的时候,interface文件的写法非常[tricky](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/api/Paddle.swig#L36)。社区贡献代码学习成本高。 + * SWIG暴露的接口保留了C++的接口样式,很难保证多语言代码风格的一致性。(函数命名,错误处理) + * 因为SWIG在第三方语言中暴露的函数名,类名和C++中完全一致。C++的命名风格并不能适应其他第三方语言。如果使用SWIG我们需要将在interface文件里,将大量的`SomeCppClass`重命名成`some_python_class`,或者`SomeGoTypes`。 + * 对于不同语言,错误处理的方式也不尽相同。例如对于Java或者Python,最常见的错误处理方式是Exception,而对于Golang,错误处理方式是返回值。而SWIG只能简单的暴露C++接口,无法做到对于各种语言错误处理方式的适配。 + * 对于大多数语言,直接使用C语言的.h并不困难。例如Python的[cffi](https://cffi.readthedocs.io/en/latest/overview.html#simple-example-abi-level-in-line)或者[Cython](http://cython.org/), golang的[cgo](https://golang.org/cmd/cgo/)。 + * SWIG支持的语言或者解释器有局限。例如对于Python,使用SWIG只支持CPython解释器,而不支持PyPy解释器。 + + +## 原因列表 + +| 结论 | 对比 | 原因 | +|---| --- | --- | +| 使用动态库 | 不使用静态库 | 解释型语言只能调用动态库,Paddle静态库链接复杂 | +| 不嵌入其他语言解释器 | 不嵌入Python解释器 | Paddle C++目前嵌入Python解释器,会导致不同版本Python在一个进程里的bug | +| 不引用其他动态库 | | Paddle一个动态库可以在任何Linux系统上运行 | +| 使用C99做接口 | 不使用C++做接口 | C有标准的ABI,C99是目前C最广泛的使用标准,且C99支持bool类型和定长整数(uint64_t等)类型 | +| 使用void*作为类句柄 | 不显示的写每个类具体包含什么| 实现简单,并且让接口脱离实现细节 | +| 手写多语言绑定 | 不使用SWIG | 使用SWIG需要多语言绑定的开发人员熟练掌握SWIG配置,社区参与困难。SWIG生成的代码不能保证多语言代码风格的一致性 | + + +## 简单实现 + +TBD diff --git a/doc/design/reader/README.md b/doc/design/reader/README.md index 17d52b9e20b8130688028092421f4b33f44763ac..f21f7af520df5171798326818ecb97c3bcd14a12 100644 --- a/doc/design/reader/README.md +++ b/doc/design/reader/README.md @@ -4,9 +4,10 @@ At training and testing time, PaddlePaddle programs need to read data. To ease t - A *reader* is a function that reads data (from file, network, random number generator, etc) and yields data items. - A *reader creator* is a function that returns a reader function. -- A *reader* decorator is a function, which accepts one or more readers, and returns a reader. +- A *reader decorator* is a function, which accepts one or more readers, and returns a reader. +- A *batch reader* is a function that reads data (from *reader*, file, network, random number generator, etc) and yields a batch of data items. -and provide frequently used reader creators and reader decorators. +and provide function which converts reader to batch reader, frequently used reader creators and reader decorators. ## Data Reader Interface @@ -22,24 +23,69 @@ An example implementation for single item data reader creator: ```python def reader_creator_random_image(width, height): - def reader(): - while True: - yield numpy.random.uniform(-1, 1, size=width*height) - return reader + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader ``` An example implementation for multiple item data reader creator: ```python -def reader_creator_random_imageand_label(widht, height, label): - def reader(): - while True: - yield numpy.random.uniform(-1, 1, size=width*height), label - return reader +def reader_creator_random_image_and_label(width, height, label): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height), label + return reader +``` + +## Batch Reader Interface + +*batch reader* can be any function with no parameter that creates a iterable (anything can be used in `for x in iterable`). The output of the iterable should be a batch (list) of data items. Each item inside the list must be a tuple. + +Here are valid outputs: +```python +# a mini batch of three data items. Each data item consist three columns of data, each of which is 1. +[(1, 1, 1), +(2, 2, 2), +(3, 3, 3)] + +# a mini batch of three data items, each data item is a list (single column). +[([1,1,1],), +([2,2,2],), +([3,3,3],), +``` + +Please note that each item inside the list must be a tuple, below is an invalid output: +```python + # wrong, [1,1,1] needs to be inside a tuple: ([1,1,1],). + # Otherwise it's ambiguous whether [1,1,1] means a single column of data [1, 1, 1], + # or three column of datas, each of which is 1. +[[1,1,1], +[2,2,2], +[3,3,3]] +``` + +It's easy to convert from reader to batch reader: +```python +mnist_train = paddle.dataset.mnist.train() +mnist_train_batch_reader = paddle.batch(mnist_train, 128) +``` + +Also easy to create custom batch reader: +```python +def custom_batch_reader(): + while True: + batch = [] + for i in xrange(128): + batch.append((numpy.random.uniform(-1, 1, 28*28),)) # note that it's a tuple being appended. + yield batch + +mnist_random_image_batch_reader = custom_batch_reader ``` ## Usage -data reader, mapping from item(s) read to data layer, batch size and number of total pass will be passed into `paddle.train`: +batch reader, mapping from item(s) read to data layer, batch size and number of total pass will be passed into `paddle.train`: ```python # two data layer is created: @@ -47,8 +93,8 @@ image_layer = paddle.layer.data("image", ...) label_layer = paddle.layer.data("label", ...) # ... - -paddle.train(paddle.dataset.mnist, {"image":0, "label":1}, 128, 10, ...) +batch_reader = paddle.batch(paddle.dataset.mnist.train(), 128) +paddle.train(batch_reader, {"image":0, "label":1}, 128, 10, ...) ``` ## Data Reader Decorator @@ -64,7 +110,7 @@ Since reading data may take time and training can not proceed without data. It i Use `paddle.reader.buffered` to prefetch data: ```python -buffered_reader = paddle.reader.buffered(paddle.dataset.mnist, 100) +buffered_reader = paddle.reader.buffered(paddle.dataset.mnist.train(), 100) ``` `buffered_reader` will try to buffer (prefetch) `100` data entries. @@ -77,24 +123,24 @@ We can do: ```python def reader_creator_random_image(width, height): - def reader(): - while True: - yield numpy.random.uniform(-1, 1, size=width*height) - return reader + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader def reader_creator_bool(t): - def reader: - while True: - yield t - return reader + def reader: + while True: + yield t + return reader true_reader = reader_creator_bool(True) false_reader = reader_creator_bool(False) -reader = paddle.reader.compose(paddle.dataset.mnist, data_reader_creator_random_image(20, 20), true_reader, false_reader) -# Skipped 1 because paddle.dataset.mnist produces two items per data entry. +reader = paddle.reader.compose(paddle.dataset.mnist.train(), data_reader_creator_random_image(20, 20), true_reader, false_reader) +# Skipped 1 because paddle.dataset.mnist.train() produces two items per data entry. # And we don't care second item at this time. -paddle.train(reader, {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) +paddle.train(paddle.batch(reader, 128), {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) ``` ### Shuffle @@ -103,16 +149,20 @@ Given shuffle buffer size `n`, `paddle.reader.shuffle` will return a data reader Example: ```python -reader = paddle.reader.shuffle(paddle.dataset.mnist, 512) +reader = paddle.reader.shuffle(paddle.dataset.mnist.train(), 512) ``` ## Q & A -### Why return only a single entry, but not a mini batch? +### Why reader return only a single entry, but not a mini batch? + +Always returning a single entry make reusing existing data readers much easier (e.g., if existing reader return not a single entry but 3 entries, training code will be more complex because it need to handle cases like batch size 2). + +We provide function `paddle.batch` to turn (single entry) reader into batch reader. -If a mini batch is returned, data reader need to take care of batch size. But batch size is a concept for training, it makes more sense for user to specify batch size as a parameter for `train`. +### Why do we need batch reader, isn't train take reader and batch_size as arguments sufficient? -Practically, always return a single entry make reusing existing data readers much easier (e.g., if existing reader return not a single entry but 3 entries, training code will be more complex because it need to handle cases like batch size 2). +In most of the case, train taking reader and batch_size as arguments would be sufficent. However sometimes user want to customize order of data entries inside a mini batch. Or even change batch size dynamically. ### Why use a dictionary but not a list to provide mapping? @@ -122,22 +172,22 @@ We decided to use dictionary (`{"image":0, "label":1}`) instead of list (`["imag ```python def image_reader_creator(image_path, label_path, n): - def reader(): - f = open(image_path) - l = open(label_path) - images = numpy.fromfile( - f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') - images = images / 255.0 * 2.0 - 1.0 - labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") - for i in xrange(n): - yield images[i, :], labels[i] # a single entry of data is created each time - f.close() - l.close() - return reader + def reader(): + f = open(image_path) + l = open(label_path) + images = numpy.fromfile( + f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') + images = images / 255.0 * 2.0 - 1.0 + labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") + for i in xrange(n): + yield images[i, :], labels[i] # a single entry of data is created each time + f.close() + l.close() + return reader # images_reader_creator creates a reader reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) -paddle.train(reader, {"image":0, "label":1}, ...) +paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...) ``` ### How is `paddle.train` implemented @@ -145,17 +195,8 @@ paddle.train(reader, {"image":0, "label":1}, ...) An example implementation of paddle.train could be: ```python -def make_minibatch(reader, minibatch_size): - def ret(): - r = reader() - buf = [r.next() for x in xrange(minibatch_size)] - while len(buf) > 0: - yield buf - buf = [r.next() for x in xrange(minibatch_size)] - return ret - -def train(reader, mapping, batch_size, total_pass): - for pass_idx in range(total_pass): - for mini_batch in make_minibatch(reader): # this loop will never end in online learning. - do_forward_backward(mini_batch, mapping) +def train(batch_reader, mapping, batch_size, total_pass): + for pass_idx in range(total_pass): + for mini_batch in batch_reader(): # this loop will never end in online learning. + do_forward_backward(mini_batch, mapping) ``` diff --git a/doc/faq/index_cn.rst b/doc/faq/index_cn.rst index 6d5367177da2af6276698f94f86664a5b506dca2..df5e172252277a881480cd2816eb901b711abe6b 100644 --- a/doc/faq/index_cn.rst +++ b/doc/faq/index_cn.rst @@ -286,3 +286,16 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 .. code-block:: bash paddle train --use_gpu=true --trainer_count=2 --gpu_id=2 + + +12. 训练过程中出现 :code:`Floating point exception`, 训练因此退出怎么办? +------------------------------------------------------------------------ + +Paddle二进制在运行时捕获了浮点数异常,只要出现浮点数异常(即训练过程中出现NaN或者Inf),立刻退出。浮点异常通常的原因是浮点数溢出、除零等问题。 +主要原因包括两个方面: + +* 训练过程中参数或者训练过程中的梯度尺度过大,导致参数累加,乘除等时候,导致了浮点数溢出。 +* 模型一直不收敛,发散到了一个数值特别大的地方。 +* 训练数据有问题,导致参数收敛到了一些奇异的情况。或者输入数据尺度过大,有些特征的取值达到数百万,这时进行矩阵乘法运算就可能导致浮点数溢出。 + +主要的解决办法是减小学习律或者对数据进行归一化处理。 diff --git a/doc/getstarted/basic_usage/index_cn.rst b/doc/getstarted/basic_usage/index_cn.rst index d01cdaaeb75ec7d02480eb9162cabaad2a947db9..428f58830e0b10c024f31238b7404c6df193eecd 100644 --- a/doc/getstarted/basic_usage/index_cn.rst +++ b/doc/getstarted/basic_usage/index_cn.rst @@ -55,7 +55,7 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍 # 线性计算网络层: ȳ = wx + b ȳ = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) # 计算误差函数,即 ȳ 和真实 y 之间的距离 - cost = regression_cost(input= ȳ, label=y) + cost = mse_cost(input= ȳ, label=y) outputs(cost) @@ -69,7 +69,7 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍 - **数据层**:数据层 `data_layer` 是神经网络的入口,它读入数据并将它们传输到接下来的网络层。这里数据层有两个,分别对应于变量 `x` 和 `y`。 - **全连接层**:全连接层 `fc_layer` 是基础的计算单元,这里利用它建模变量之间的线性关系。计算单元是神经网络的核心,PaddlePaddle支持大量的计算单元和任意深度的网络连接,从而可以拟合任意的函数来学习复杂的数据关系。 - - **回归误差代价层**:回归误差代价层 `regression_cost` 是众多误差代价函数层的一种,它们在训练过程作为网络的出口,用来计算模型的误差,是模型参数优化的目标函数。 + - **回归误差代价层**:回归误差代价层 `mse_cost` 是众多误差代价函数层的一种,它们在训练过程作为网络的出口,用来计算模型的误差,是模型参数优化的目标函数。 定义了网络结构并保存为 `trainer_config.py` 之后,运行以下训练命令: diff --git a/doc/getstarted/basic_usage/index_en.rst b/doc/getstarted/basic_usage/index_en.rst index c10b897d4292d0c2b062b5c8e23466505afa408a..6775da20c2f51000f305b095d40abd27b8fa6c0e 100644 --- a/doc/getstarted/basic_usage/index_en.rst +++ b/doc/getstarted/basic_usage/index_en.rst @@ -49,7 +49,7 @@ To recover this relationship between ``X`` and ``Y``, we use a neural network wi x = data_layer(name='x', size=1) y = data_layer(name='y', size=1) y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) - cost = regression_cost(input=y_predict, label=y) + cost = mse_cost(input=y_predict, label=y) outputs(cost) Some of the most fundamental usages of PaddlePaddle are demonstrated: diff --git a/doc/getstarted/build_and_install/build_from_source_en.md b/doc/getstarted/build_and_install/build_from_source_en.md index d9d54bff3096cb3520409971dbd1b2e179ac8be1..69f4501f370dcc9d603ec54a63d68568d66e832e 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.md +++ b/doc/getstarted/build_and_install/build_from_source_en.md @@ -51,7 +51,7 @@ PaddlePaddle supports some build options. WITH_TIMERCompile PaddlePaddle with stats timer WITH_PROFILERCompile PaddlePaddle with GPU profiler WITH_DOCCompile PaddlePaddle with documentation -ON_COVERALLSCompile PaddlePaddle with code coverage +WITH_COVERAGECompile PaddlePaddle with code coverage COVERALLS_UPLOADPackage code coverage data to coveralls ON_TRAVISExclude special unit test on Travis CI diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 6b132d2a4d31ab85347bd41d0243ffee858ac909..22db1ef658ca35f0ab18895c1da1003bd3cd93fa 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -1,158 +1,173 @@ -安装PaddlePaddle的Docker镜像 -============================ +PaddlePaddle的Docker容器使用方式 +================================ -PaddlePaddle项目提供官方 `Docker `_ 镜像。Docker镜像是我们目前唯一官方支持的部署和运行方式。 +PaddlePaddle目前唯一官方支持的运行的方式是Docker容器。因为Docker能在所有主要操作系统(包括Linux,Mac OS X和Windows)上运行。 请注意,您需要更改 `Dockers设置 `_ 才能充分利用Mac OS X和Windows上的硬件资源。 -下述内容将分为如下几个类别描述。 -* PaddlePaddle提供的Docker镜像版本 -* 下载和运行Docker镜像 -* 注意事项 +PaddlePaddle发布的docker镜像使用说明 +------------------------------ -PaddlePaddle提供的Docker镜像版本 --------------------------------- +对于每一个PaddlePaddle版本,我们都会发布两种Docker镜像:开发镜像、运行镜像。运行镜像包括纯CPU版本和GPU版本以及其对应的非AVX版本。 +我们会在 `dockerhub.com `_ 提供最新的docker镜像,可以在"tags"标签下找到最新的Paddle镜像版本。 +1. 开发镜像::code:`paddlepaddle/paddle:-dev` -我们提供了12个 `Docker image `_ ,他们的image name都是 :code:`paddledev/paddle` ,tag分别为 + 这个镜像包含了Paddle相关的开发工具以及编译和运行环境。用户可以使用开发镜像代替配置本地环境,完成开发,编译,发布, + 文档编写等工作。由于不同的Paddle的版本可能需要不同的依赖和工具,所以如果需要自行配置开发环境需要考虑版本的因素。 + 开发镜像包含了以下工具: + - gcc/clang + - nvcc + - Python + - sphinx + - woboq + - sshd + 很多开发者会使用远程的安装有GPU的服务器工作,用户可以使用ssh登录到这台服务器上并执行 :code:`docker exec`进入开发镜像并开始工作, + 也可以在开发镜像中启动一个SSHD服务,方便开发者直接登录到镜像中进行开发: -+-----------------+------------------+------------------------+-----------------------+ -| | normal | devel | demo | -+=================+==================+========================+=======================+ -| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest | -+-----------------+------------------+------------------------+-----------------------+ -| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest | -+-----------------+------------------+------------------------+-----------------------+ -| CPU WITHOUT AVX | cpu-noavx-latest | cpu-noavx-devel-latest | cpu-noavx-demo-latest | -+-----------------+------------------+------------------------+-----------------------+ -| GPU WITHOUT AVX | gpu-noavx-latest | gpu-noavx-devel-latest | gpu-noavx-demo-latest | -+-----------------+------------------+------------------------+-----------------------+ + 以交互容器方式运行开发镜像: -其中,横向包括三个版本,normal,devel和demo。 + .. code-block:: bash -* Normal: 正常的Docker image,只包括paddle的二进制 -* Devel: 包括Paddle的二进制、编译环境和源代码 -* Demo: 包括Paddle运行demo所需要的依赖 + docker run -it --rm paddledev/paddle:-dev /bin/bash -纵向包括四个版本,他们是。 + 或者,可以以后台进程方式运行容器: -* CPU: CPU版本。需要支持AVX指令集的CPU -* GPU: GPU版本。需要支持AVX指令集的CPU -* CPU WITHOUT AVX: CPU版本,不支持AVX指令集的CPU也可以运行 -* GPU WITHOUT AVX: GPU版本,不需要AVX指令集的CPU也可以运行。 + .. code-block:: bash -用户可以选择对应版本的docker image。使用如下脚本可以确定本机的CPU是否支持 :code:`AVX` 指令集\: + docker run -d -p 2202:22 -p 8888:8888 paddledev/paddle:-dev -.. code-block:: bash + 然后用密码 :code:`root` SSH进入容器: - if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi + .. code-block:: bash -如果输出 :code:`Support AVX`,则可以选择上表中的AVX版本PaddlePaddle。否则需要选择非AVX的PaddlePaddle。选择普通CPU版本的devel版本的image,则可以使用 :code:`paddledev/paddle:cpu-devel-latest` 来引用这个image。 + ssh -p 2202 root@localhost -PaddlePaddle提供的镜像并不包含任何命令运行,想要运行PaddlePaddle,您需要进入镜像运行PaddlePaddle -程序或者自定义一个含有启动脚本的image。具体请参考注意事项中的 :code:`使用ssh访问PaddlePaddle镜像` + SSH方式的一个优点是我们可以从多个终端进入容器。比如,一个终端运行vi,另一个终端运行Python。另一个好处是我们可以把PaddlePaddle容器运行在远程服务器上,并在笔记本上通过SSH与其连接。 -下载和运行Docker镜像 --------------------- +2. 运行镜像:根据CPU、GPU和非AVX区分了如下4个镜像: + - GPU/AVX::code:`paddlepaddle/paddle:-gpu` + - GPU/no-AVX::code:`paddlepaddle/paddle:-gpu-noavx` + - CPU/AVX::code:`paddlepaddle/paddle:` + - CPU/no-AVX::code:`paddlepaddle/paddle:-noavx` -为了运行PaddlePaddle的docker镜像,您需要在机器中安装好Docker。安装Docker需要您的机器 -至少具有3.10以上的linux kernel。安装方法请参考 -`Docker的官方文档 `_ 。如果您使用 -mac osx或者是windows机器,请参考 -`mac osx的安装文档 `_ 和 -`windows 的安装文档 `_ 。 + 纯CPU镜像以及GPU镜像都会用到AVX指令集,但是2008年之前生产的旧电脑不支持AVX。以下指令能检查Linux电脑是否支持AVX: -您可以使用 :code:`docker pull` 命令预先下载镜像,也可以直接执行 -:code:`docker run` 命令运行镜像。执行方法如下: + .. code-block:: bash -.. code-block:: bash - - $ docker run -it paddledev/paddle:cpu-latest + if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi -即可启动和进入PaddlePaddle的container。如果运行GPU版本的PaddlePaddle,则需要先将 -cuda相关的Driver和设备映射进container中,脚本类似于 + 如果输出是No,就需要选择使用no-AVX的镜像 -.. code-block:: bash + 以上方法在GPU镜像里也能用,只是请不要忘记提前在物理机上安装GPU最新驱动。 + 为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。 - $ export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" - $ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - $ docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest + .. code-block:: bash -进入Docker container后,运行 :code:`paddle version` 即可打印出PaddlePaddle的版本和构建 -信息。安装完成的PaddlePaddle主体包括三个部分, :code:`paddle` 脚本, python的 -:code:`paddle` 包和 :code:`py_paddle` 包。其中\: + nvidia-docker run -it --rm paddledev/paddle:0.10.0rc1-gpu /bin/bash -* :code:`paddle` 脚本和 :code:`paddle` 的python包是PaddlePaddle的训练主要程序。使用 - :code:`paddle` 脚本可以启动PaddlePaddle的训练进程和pserver。而 :code:`paddle` 脚本 - 中的二进制使用了 :code:`paddle` 的python包来做配置文件解析等工作。 -* python包 :code:`py_paddle` 是一个swig封装的PaddlePaddle包,用来做预测和简单的定制化 - 训练。 + 注意: 如果使用nvidia-docker存在问题,你也许可以尝试更老的方法,具体如下,但是我们并不推荐这种方法。: -注意事项 --------- + .. code-block:: bash -性能问题 -++++++++ + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" + export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:-gpu -由于Docker是基于容器的轻量化虚拟方案,所以在CPU的运算性能上并不会有严重的影响。 -而GPU的驱动和设备全部映射到了容器内,所以GPU在运算性能上也不会有严重的影响。 +3. 使用运行镜像发布你的AI程序 + 假设您已经完成了一个AI训练的python程序 :code:`a.py`,这个程序是您在开发机上使用开发镜像完成开发。此时您可以运行这个命令在开发机上进行测试运行: -但是如果使用了高性能的网卡,例如RDMA网卡(RoCE 40GbE 或者 IB 56GbE),或者高性能的 -以太网卡 (10GbE)。推荐使用将本地网卡,即 "--net=host" 来进行训练。而不使用docker -的网桥来进行网络通信。 + .. code-block:: bash -远程访问问题和二次开发 -++++++++++++++++++++++ + docker run -it -v $PWD:/work paddle /work/a.py -由于PaddlePaddle的Docker镜像并不包含任何预定义的运行命令。所以如果想要在后台启用ssh -远程访问,则需要进行一定的二次开发,将ssh装入系统内并开启远程访问。二次开发可以 -使用Dockerfile构建一个全新的docker image。需要参考 -`Dockerfile的文档 `_ 和 -`Dockerfile的最佳实践 `_ -两个文档。 + 这里`a.py`包含的所有依赖假设都可以在Paddle的运行容器中。如果需要包含更多的依赖、或者需要发布您的应用的镜像,可以编写`Dockerfile`使用`FROM paddledev/paddle:` + 创建和发布自己的AI程序镜像。 -简单的含有ssh的Dockerfile如下: +运行PaddlePaddle书籍 +--------------------- -.. code-block:: bash +Jupyter Notebook是一个开源的web程序,大家可以通过它制作和分享带有代码、公式、图表、文字的交互式文档。用户可以通过网页浏览文档。 - FROM paddledev/paddle:cpu-latest +PaddlePaddle书籍是为用户和开发者制作的一个交互式的Jupyter Nodebook。 +如果您想要更深入了解deep learning,PaddlePaddle书籍一定是您最好的选择。 - MAINTAINER PaddlePaddle dev team +我们提供可以直接运行PaddlePaddle书籍的docker镜像,直接运行: - RUN apt-get update - RUN apt-get install -y openssh-server - RUN mkdir /var/run/sshd - RUN echo 'root:root' | chpasswd +.. code-block:: bash - RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config - RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config + docker run -p 8888:8888 paddlepaddle/book - EXPOSE 22 +然后在浏览器中输入以下网址: - CMD ["/usr/sbin/sshd", "-D"] +.. code-block:: text + http://localhost:8888/ -使用该Dockerfile构建出镜像,然后运行这个container即可。相关命令为\: +就这么简单,享受您的旅程! -.. code-block:: bash +通过Docker容器开发PaddlePaddle +------------------------------ - # cd到含有Dockerfile的路径中 - $ docker build . -t paddle_ssh - # 运行这个container,将宿主机的8022端口映射到container的22端口上 - $ docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh +开发人员可以在Docker开发镜像中开发PaddlePaddle。这样开发人员可以以一致的方式在不同的平台上工作 - Linux,Mac OS X和Windows。 -执行如下命令即可以关闭这个container,并且删除container中的数据\: +1. 构建开发镜像 -.. code-block:: bash - - # 关闭container - $ docker stop paddle_ssh_machine - # 删除container - $ docker rm paddle_ssh_machine + .. code-block:: bash -如果想要在外部机器访问这个container,即可以使用ssh访问宿主机的8022端口。用户名为 -root,密码也是root。命令为\: + git clone --recursive https://github.com/PaddlePaddle/Paddle + cd Paddle + docker build -t paddle:dev . -.. code-block:: bash - $ ssh -p 8022 root@YOUR_HOST_MACHINE + 请注意,默认情况下,:code:`docker build` 不会将源码导入到镜像中并编译它。如果我们想这样做,需要构建完开发镜像,然后执行: -至此,您就可以远程的使用PaddlePaddle啦。 + .. code-block:: bash + + docker run -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "TEST=OFF" paddle:dev + + +2. 运行开发环境 + + 当我们编译好了 :code:`paddle:dev`, 我们可以在docker容器里做开发,源代码可以通过挂载本地文件来被载入Docker的开发环境里面: + + .. code-block:: bash + + docker run -d -p 2202:22 -v $PWD:/paddle paddle:dev sshd + + 以上代码会启动一个带有PaddlePaddle开发环境的docker容器,源代码会被挂载到 :code:`/paddle` 。 + + 以上的 :code:`docker run` 命令其实会启动一个在2202端口监听的SSHD服务器。这样,我们就能SSH进入我们的开发容器了: + + .. code-block:: bash + + ssh root@localhost -p 2202 + +3. 在Docker开发环境中编译与安装PaddlPaddle代码 + + 当在容器里面的时候,可以用脚本 :code:`paddle/scripts/docker/build.sh` 来编译、安装与测试PaddlePaddle: + + .. code-block:: bash + + /paddle/paddle/scripts/docker/build.sh + + 以上指令会在 :code:`/paddle/build` 中编译PaddlePaddle。通过以下指令可以运行单元测试: + + .. code-block:: bash + + cd /paddle/build + ctest + + +文档 +---- + +Paddle的Docker开发镜像带有一个通过 `woboq code browser +`_ 生成的HTML版本的C++源代码,便于用户浏览C++源码。 + +只要在Docker里启动PaddlePaddle的时候给它一个名字,就可以再运行另一个Nginx Docker镜像来服务HTML代码: + +.. code-block:: bash + + docker run -d --name paddle-cpu-doc paddle:-dev + docker run -d --volumes-from paddle-cpu-doc -p 8088:80 nginx + +接着我们就能够打开浏览器在 http://localhost:8088/paddle/ 浏览代码。 diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index 5a1056e859a0c977c9cd365ff1e4ffe58596f41f..8fb9369e0e8e31e620169fa2856094c414efe23e 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -8,153 +8,255 @@ Please be aware that you will need to change `Dockers settings `_ to make full use of your hardware resource on Mac OS X and Windows. +Working With Docker +------------------- -Development Using Docker ------------------------- +Docker is simple as long as we understand a few basic concepts: -Developers can work on PaddlePaddle using Docker. This allows -developers to work on different platforms -- Linux, Mac OS X, and -Windows -- in a consistent way. +- *image*: A Docker image is a pack of software. It could contain one or more programs and all their dependencies. For example, the PaddlePaddle's Docker image includes pre-built PaddlePaddle and Python and many Python packages. We can run a Docker image directly, other than installing all these software. We can type -1. Build the Development Environment as a Docker Image + .. code-block:: bash - .. code-block:: bash + docker images - git clone --recursive https://github.com/PaddlePaddle/Paddle - cd Paddle - docker build -t paddle:dev -f paddle/scripts/docker/Dockerfile . + to list all images in the system. We can also run + .. code-block:: bash + + docker pull paddlepaddle/paddle:0.10.0rc2 - Note that by default :code:`docker build` wouldn't import source - tree into the image and build it. If we want to do that, we need - to set a build arg: + to download a Docker image, paddlepaddle/paddle in this example, + from Dockerhub.com. - .. code-block:: bash +- *container*: considering a Docker image a program, a container is a + "process" that runs the image. Indeed, a container is exactly an + operating system process, but with a virtualized filesystem, network + port space, and other virtualized environment. We can type - docker build -t paddle:dev -f paddle/scripts/docker/Dockerfile --build-arg BUILD_AND_INSTALL=ON . + .. code-block:: bash + docker run paddlepaddle/paddle:0.10.0rc2 -2. Run the Development Environment + to start a container to run a Docker image, paddlepaddle/paddle in this example. - Once we got the image :code:`paddle:dev`, we can use it to develop - Paddle by mounting the local source code tree into a container that - runs the image: +- By default docker container have an isolated file system namespace, + we can not see the files in the host file system. By using *volume*, + mounted files in host will be visible inside docker container. + Following command will mount current dirctory into /data inside + docker container, run docker container from debian image with + command :code:`ls /data`. - .. code-block:: bash + .. code-block:: bash + + docker run --rm -v $(pwd):/data debian ls /data + +Usage of CPU-only and GPU Images +---------------------------------- - docker run -d -p 2202:22 -v $PWD:/paddle paddle:dev +For each version of PaddlePaddle, we release two types of Docker images: +development image and production image. Production image includes +CPU-only version and a CUDA GPU version and their no-AVX versions. We +put the docker images on `dockerhub.com +`_. You can find the +latest versions under "tags" tab at dockerhub.com - This runs a container of the development environment Docker image - with the local source tree mounted to :code:`/paddle` of the - container. +1. Production images, this image might have multiple variants: - Note that the default entry-point of :code:`paddle:dev` is - :code:`sshd`, and above :code:`docker run` commands actually starts - an SSHD server listening on port 2202. This allows us to log into - this container with: + - GPU/AVX::code:`paddlepaddle/paddle:-gpu` + - GPU/no-AVX::code:`paddlepaddle/paddle:-gpu-noavx` + - CPU/AVX::code:`paddlepaddle/paddle:` + - CPU/no-AVX::code:`paddlepaddle/paddle:-noavx` + + Please be aware that the CPU-only and the GPU images both use the + AVX instruction set, but old computers produced before 2008 do not + support AVX. The following command checks if your Linux computer + supports AVX: .. code-block:: bash - ssh root@localhost -p 2202 + if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi - Usually, I run above commands on my Mac. I can also run them on a - GPU server :code:`xxx.yyy.zzz.www` and ssh from my Mac to it: + + To run the CPU-only image as an interactive container: .. code-block:: bash - my-mac$ ssh root@xxx.yyy.zzz.www -p 2202 + docker run -it --rm paddlepaddle/paddle:0.10.0rc2 /bin/bash + + Above method work with the GPU image too -- the recommended way is + using `nvidia-docker `_. -3. Build and Install Using the Development Environment + Please install nvidia-docker first following this `tutorial + `_. - Once I am in the container, I can use - :code:`paddle/scripts/docker/build.sh` to build, install, and test - Paddle: + Now you can run a GPU image: .. code-block:: bash - /paddle/paddle/scripts/docker/build.sh + nvidia-docker run -it --rm paddlepaddle/paddle:0.10.0rc2-gpu /bin/bash - This builds everything about Paddle in :code:`/paddle/build`. And - we can run unit tests there: +2. development image :code:`paddlepaddle/paddle:-dev` - .. code-block:: bash + This image has packed related develop tools and runtime + environment. Users and developers can use this image instead of + their own local computer to accomplish development, build, + releasing, document writing etc. While different version of paddle + may depends on different version of libraries and tools, if you + want to setup a local environment, you must pay attention to the + versions. The development image contains: + + - gcc/clang + - nvcc + - Python + - sphinx + - woboq + - sshd + + Many developers use servers with GPUs, they can use ssh to login to + the server and run :code:`docker exec` to enter the docker + container and start their work. Also they can start a development + docker image with SSHD service, so they can login to the container + and start work. - cd /paddle/build - ctest +Train Model Using Python API +---------------------------- -CPU-only and GPU Images ------------------------ +Our official docker image provides a runtime for PaddlePaddle +programs. The typical workflow will be as follows: -For each version of PaddlePaddle, we release 2 Docker images, a -CPU-only one and a CUDA GPU one. We do so by configuring -`dockerhub.com `_ -automatically runs the following commands: +Create a directory as workspace: .. code-block:: bash - docker build -t paddle:cpu -f paddle/scripts/docker/Dockerfile . - docker build -t paddle:gpu -f paddle/scripts/docker/Dockerfile.gpu . + mkdir ~/workspace +Edit a PaddlePaddle python program using your favourite editor + +.. code-block:: bash -To run the CPU-only image as an interactive container: + emacs ~/workspace/example.py + +Run the program using docker: .. code-block:: bash - docker run -it --rm paddledev/paddle:cpu-latest /bin/bash + docker run --rm -v ~/workspace:/workspace paddlepaddle/paddle:0.10.0rc2 python /workspace/example.py -or, we can run it as a daemon container +Or if you are using GPU for training: .. code-block:: bash - docker run -d -p 2202:22 paddledev/paddle:cpu-latest + nvidia-docker run --rm -v ~/workspace:/workspace paddlepaddle/paddle:0.10.0rc2-gpu python /workspace/example.py + +Above commands will start a docker container by running :code:`python +/workspace/example.py`. It will stop once :code:`python +/workspace/example.py` finishes. -and SSH to this container using password :code:`root`: +Another way is to tell docker to start a :code:`/bin/bash` session and +run PaddlePaddle program interactively: .. code-block:: bash - ssh -p 2202 root@localhost + docker run -it -v ~/workspace:/workspace paddlepaddle/paddle:0.10.0rc2 /bin/bash + # now we are inside docker container + cd /workspace + python example.py + +Running with GPU is identical: + +.. code-block:: bash -An advantage of using SSH is that we can connect to PaddlePaddle from -more than one terminals. For example, one terminal running vi and -another one running Python interpreter. Another advantage is that we -can run the PaddlePaddle container on a remote server and SSH to it -from a laptop. + nvidia-docker run -it -v ~/workspace:/workspace paddlepaddle/paddle:0.10.0rc2-gpu /bin/bash + # now we are inside docker container + cd /workspace + python example.py -Above methods work with the GPU image too -- just please don't forget -to install CUDA driver and let Docker knows about it: +Develop PaddlePaddle or Train Model Using C++ API +--------------------------------------------------- + +We will be using PaddlePaddle development image since it contains all +compiling tools and dependencies. + +Let's clone PaddlePaddle repo first: .. code-block:: bash - export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" - export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest + git clone https://github.com/PaddlePaddle/Paddle.git && cd Paddle + +Mount both workspace folder and paddle code folder into docker +container, so we can access them inside docker container. There are +two ways of using PaddlePaddle development docker image: + +- run interactive bash directly + .. code-block:: bash -Non-AVX Images --------------- + # use nvidia-docker instead of docker if you need to use GPU + docker run -it -v ~/workspace:/workspace -v $(pwd):/paddle paddlepaddle/paddle:0.10.0rc2-dev /bin/bash + # now we are inside docker container -Please be aware that the CPU-only and the GPU images both use the AVX -instruction set, but old computers produced before 2008 do not support -AVX. The following command checks if your Linux computer supports -AVX: +- or, we can run it as a daemon container + + .. code-block:: bash + + # use nvidia-docker instead of docker if you need to use GPU + docker run -d -p 2202:22 -p 8888:8888 -v ~/workspace:/workspace -v $(pwd):/paddle paddlepaddle/paddle:0.10.0rc2-dev /usr/sbin/sshd -D + + and SSH to this container using password :code:`root`: + + .. code-block:: bash + + ssh -p 2202 root@localhost + + An advantage is that we can run the PaddlePaddle container on a + remote server and SSH to it from a laptop. + +When developing PaddlePaddle, you can edit PaddlePaddle source code +from outside of docker container using your favoriate editor. To +compile PaddlePaddle, run inside container: + +.. code-block:: bash + + WITH_GPU=OFF WITH_AVX=ON WITH_TEST=ON bash /paddle/paddle/scripts/docker/build.sh + +This builds everything about Paddle in :code:`/paddle/build`. And we +can run unit tests there: .. code-block:: bash - if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi + cd /paddle/build + ctest + +When training model using C++ API, we can edit paddle program in +~/workspace outside of docker. And build from /workspace inside of +docker. + +PaddlePaddle Book +------------------ + +The Jupyter Notebook is an open-source web application that allows +you to create and share documents that contain live code, equations, +visualizations and explanatory text in a single browser. +PaddlePaddle Book is an interactive Jupyter Notebook for users and developers. +We already exposed port 8888 for this book. If you want to +dig deeper into deep learning, PaddlePaddle Book definitely is your best choice. -If it doesn't, we will need to build non-AVX images manually from -source code: +We provide a packaged book image, simply issue the command: .. code-block:: bash - cd ~ - git clone https://github.com/PaddlePaddle/Paddle.git - cd Paddle - docker build --build-arg WITH_AVX=OFF -t paddle:cpu-noavx -f paddle/scripts/docker/Dockerfile . - docker build --build-arg WITH_AVX=OFF -t paddle:gpu-noavx -f paddle/scripts/docker/Dockerfile.gpu . + docker run -p 8888:8888 paddlepaddle/book + +Then, you would back and paste the address into the local browser: + +.. code-block:: text + + http://localhost:8888/ + +That's all. Enjoy your journey! Documentation @@ -171,7 +273,7 @@ container: .. code-block:: bash - docker run -d --name paddle-cpu-doc paddle:cpu + docker run -d --name paddle-cpu-doc paddle: docker run -d --volumes-from paddle-cpu-doc -p 8088:80 nginx diff --git a/doc/getstarted/build_and_install/ubuntu_install_cn.rst b/doc/getstarted/build_and_install/ubuntu_install_cn.rst index d02d9c63bbfb50954d7b75f2c685ce167a3b7146..9e39ccb00f5d5655c30148900a3d76a22aacfc01 100644 --- a/doc/getstarted/build_and_install/ubuntu_install_cn.rst +++ b/doc/getstarted/build_and_install/ubuntu_install_cn.rst @@ -46,7 +46,6 @@ PaddlePaddle提供了ubuntu 14.04 deb安装包。 with_double: OFF with_python: ON with_rdma: OFF - with_metric_learning: with_timer: OFF with_predict_sdk: diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index bd3d0ec292057037414792b1ac176d12605b90d5..5b84eea491f874459ed2071e4c942657cdc9b18b 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -10,6 +10,7 @@ usage/cmd_parameter/index_cn.rst usage/concepts/use_concepts_cn.rst usage/cluster/cluster_train_cn.md + usage/k8s/k8s_basis_cn.md usage/k8s/k8s_cn.md usage/k8s/k8s_distributed_cn.md diff --git a/doc/howto/usage/cluster/cluster_train_cn.md b/doc/howto/usage/cluster/cluster_train_cn.md index acdcfa1c0047ced85c0a9c53d691edc0b4489336..274452fbf0c595ad7b4dbeffe85ad9038f12b458 100644 --- a/doc/howto/usage/cluster/cluster_train_cn.md +++ b/doc/howto/usage/cluster/cluster_train_cn.md @@ -6,7 +6,7 @@ 在本文中,我们将阐释如何在集群上运行分布式 Paddle 训练作业。我们将以[推荐系统](https://github.com/baidu/Paddle/tree/develop/demo/recommendation)为例创建分布式的单进程训练。 -在本文中使用的[脚本](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train)通过 SSH 运行分布式作业。 它们还可以供那些运行更复杂的集群管理系统(如 MPI 和 [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/k8s) )的用户参考。 +在本文中使用的[脚本](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train)通过 SSH 运行分布式作业。 它们还可以供那些运行更复杂的集群管理系统(如 MPI 和 [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/k8s) )的用户参考。 ## 前提条件 diff --git a/doc/howto/usage/cluster/cluster_train_en.md b/doc/howto/usage/cluster/cluster_train_en.md index 30963dcd927250651f3ed0b39949f541cc28ed4a..c60876721cbf5565d6e48c8061811aacada748cd 100644 --- a/doc/howto/usage/cluster/cluster_train_en.md +++ b/doc/howto/usage/cluster/cluster_train_en.md @@ -2,7 +2,7 @@ In this article, we explain how to run distributed Paddle training jobs on clusters. We will create the distributed version of the single-process training example, [recommendation](https://github.com/baidu/Paddle/tree/develop/demo/recommendation). -[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/k8s). +[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/k8s). ## Prerequisite diff --git a/doc/howto/usage/cmd_parameter/arguments_cn.md b/doc/howto/usage/cmd_parameter/arguments_cn.md index 2e2a2fcc54a09f4f41e4ebbc317e1409591ddd9c..f7aa525054468670f59309ddf9206af55bb77869 100644 --- a/doc/howto/usage/cmd_parameter/arguments_cn.md +++ b/doc/howto/usage/cmd_parameter/arguments_cn.md @@ -228,16 +228,6 @@ √√ - -度量学习(metric learning)external -√√√√ - - - -data_server_port -√√ - - 参数服务器(PServer)start_pserver √√ diff --git a/doc/howto/usage/cmd_parameter/arguments_en.md b/doc/howto/usage/cmd_parameter/arguments_en.md index e5546f0ddc78a9f8bdc306a19c2fe9a415463e5a..d1963067bda949b11ececefed3db7db1432c6223 100644 --- a/doc/howto/usage/cmd_parameter/arguments_en.md +++ b/doc/howto/usage/cmd_parameter/arguments_en.md @@ -228,16 +228,6 @@ It looks like there are a lot of arguments. However, most of them are for develo √√ - -metric learningexternal -√√√√ - - - -data_server_port -√√ - - PServerstart_pserver √√ diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md b/doc/howto/usage/cmd_parameter/detail_introduction_cn.md index 3b573a324d541b024600a254d5266e517db229c5..b4625ba68cf23e5697554ba94efaf0b873f2c1de 100644 --- a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md +++ b/doc/howto/usage/cmd_parameter/detail_introduction_cn.md @@ -180,15 +180,6 @@  - 用户可以自定义beam search的方法,编译成动态库,供PaddlePaddle加载。 该参数用于指定动态库路径. - 类型: string (默认: "", null). -## 度量学习(Metric Learning) -* `--external` - - 指示是否使用外部机器进行度量学习. - - 类型: bool (默认: 0). - -* `--data_server_port` - - 数据服务器(data server)的监听端口,主要用在度量学习中. - - 类型: int32 (默认: 21134). - ## 数据支持(DataProvider) * `--memory_threshold_on_load_data` diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_en.md b/doc/howto/usage/cmd_parameter/detail_introduction_en.md index 33b7ec0d51a96ee126197e7aa819fdae0d3dc353..b681ebc81a355dfc1a7638a4463dff6979929a45 100644 --- a/doc/howto/usage/cmd_parameter/detail_introduction_en.md +++ b/doc/howto/usage/cmd_parameter/detail_introduction_en.md @@ -184,15 +184,6 @@ - Specify shared dynamic library. It can be defined out of paddle by user. - type: string (default: "", null). -## Metric Learning -* `--external` - - Whether to use external machine for metric learning. - - type: bool (default: 0). - -* `--data_server_port` - - Listening port for dserver (data server), dserver is mainly used in metric learning. - - type: int32 (default: 21134). - ## DataProvider * `--memory_threshold_on_load_data` diff --git a/doc/howto/usage/k8s/k8s_basis_cn.md b/doc/howto/usage/k8s/k8s_basis_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..6278dacb17a378da660b2f5434247efd41c995fc --- /dev/null +++ b/doc/howto/usage/k8s/k8s_basis_cn.md @@ -0,0 +1,75 @@ +# Kubernetes 简介 + +[*Kubernetes*](http://kubernetes.io/)是Google开源的容器集群管理系统,其提供应用部署、维护、扩展机制等功能,利用Kubernetes能方便地管理跨机器运行容器化的应用。Kubernetes可以在物理机或虚拟机上运行,且支持部署到[AWS](http://kubernetes.io/docs/getting-started-guides/aws),[Azure](http://kubernetes.io/docs/getting-started-guides/azure/),[GCE](http://kubernetes.io/docs/getting-started-guides/gce)等多种公有云环境。介绍分布式训练之前,需要对[Kubernetes](http://kubernetes.io/)有一个基本的认识,下面先简要介绍一下本文用到的几个Kubernetes概念。 + +- [*Node*](http://kubernetes.io/docs/admin/node/) 表示一个Kubernetes集群中的一个工作节点,这个节点可以是物理机或者虚拟机,Kubernetes集群就是由node节点与master节点组成的。 + +- [*Pod*](http://kubernetes.io/docs/user-guide/pods/) 是一组(一个或多个)容器,pod是Kubernetes的最小调度单元,一个pod中的所有容器会被调度到同一个node上。Pod中的容器共享NET,PID,IPC,UTS等Linux namespace。由于容器之间共享NET namespace,所以它们使用同一个IP地址,可以通过*localhost*互相通信。不同pod之间可以通过IP地址访问。 + +- [*Job*](http://kubernetes.io/docs/user-guide/jobs/) 描述Kubernetes上运行的作业,一次作业称为一个job,通常每个job包括一个或者多个pods,job启动后会创建这些pod并开始执行一个程序,等待这个程序执行成功并返回0则成功退出,如果执行失败,也可以配置不同的重试机制。 + +- [*Volume*](http://kubernetes.io/docs/user-guide/volumes/) 存储卷,是pod内的容器都可以访问的共享目录,也是容器与node之间共享文件的方式,因为容器内的文件都是暂时存在的,当容器因为各种原因被销毁时,其内部的文件也会随之消失。通过volume,就可以将这些文件持久化存储。Kubernetes支持多种volume,例如hostPath(宿主机目录),gcePersistentDisk,awsElasticBlockStore等。 + +- [*Namespaces*](https://kubernetes.io/docs/user-guide/namespaces/) 命名空间,在kubernetes中创建的所有资源对象(例如上文的pod,job)等都属于一个命名空间,在同一个命名空间中,资源对象的名字是唯一的,不同空间的资源名可以重复,命名空间主要为了对象进行逻辑上的分组便于管理。本文只使用了默认命名空间。 + +- [*PersistentVolume*](https://kubernetes.io/docs/user-guide/persistent-volumes/): 和[*PersistentVolumeClaim*](https://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims)结合,将外部的存储服务在Kubernetes中描述成为统一的资源形式,便于存储资源管理和Pod引用。 + +# 部署Kubernetes集群 + +Kubernetes提供了多种集群部署的方案,本文档内不重复介绍。这里给出集中常见的部署方法: + +- [*minikube*](https://kubernetes.io/docs/getting-started-guides/minikube/): 快速在本地启动一个单机的kubernetes服务器,便于本地验证和测试。 +- [*kubeadm*](http://kubernetes.io/docs/getting-started-guides/kubeadm/): 在不同操作系统,不同主机(Bare-Metal, AWS, GCE)条件下,快速部署集群。 +- [*AWS EC2*](https://kubernetes.io/docs/getting-started-guides/aws/): 在aws上快速部署集群。 +- [*Bare-Metal*](https://kubernetes.io/docs/getting-started-guides/centos/centos_manual_config/): 在物理机上手动部署。 + +可以参考[这个表格](https://kubernetes.io/docs/getting-started-guides/#table-of-solutions)选择适合您的场景的合适方案。 + +# 选择存储方案 + +容器不会保留在运行时生成的数据,job或者应用程序在容器中运行时生成的数据会在容器销毁时消失。为了完成分布式机器学习训练任务,需要有一个外部的存储服务来保存训练所需数据和训练输出。 +常见的可选存储服务包括: + +- [*NFS*](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/nfs): 可以将磁盘上某个目录共享给网络中其他机器访问。部署和配置比较简单,可以用于小量数据的验证。不提供分布式存储,高可用,冗余等功能。NFS的部署方法可以参考[这里](http://www.tecmint.com/how-to-setup-nfs-server-in-linux/)。 +- [*GlusterFS*](http://gluster.readthedocs.io/en/latest/Quick-Start-Guide/Quickstart/): 网络分布式文件系统,可以在Kubernetes中按照[这个](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/glusterfs)例子使用。 +- [*Ceph*](http://docs.ceph.com/docs/master/): 分布式文件系统,支持rbd,POSIX API接口(ceph fs)和对象存储API,参考[这里](https://kubernetes.io/docs/user-guide/volumes/#rbd)。 +- [*MooseFS*](https://moosefs.com/documentation.html): 一个分布式的存储系统。需要先挂载到服务器Node上再通过kubernetes hostPath Volume挂载到容器中。 + +# 配置kubectl + +## 安装kubectl +``` +# OS X +curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl + +# Linux +curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl + +# Windows +curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/windows/amd64/kubectl.exe +``` + +## 配置kubectl访问你的kubernetes集群 + +编辑`~/.kube/config`这个配置文件,修改`Master-IP`的地址。如果使用SSL认证,则需要配置`certificate-authority`和`users`中的用户证书。如果是使用非SSL方式访问(比如通过8080端口),也可以去掉这些证书的配置。 +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority: /path/to/ca.crt + server: https://[Master-IP]:443 + name: minikube +contexts: +- context: + cluster: minikube + user: minikube + name: minikube +current-context: minikube +kind: Config +preferences: {} +users: +- name: minikube + user: + client-certificate: /path/to/apiserver.crt + client-key: /Users/wuyi/.minikube/apiserver.key +``` diff --git a/doc/howto/usage/k8s/k8s_distributed_cn.md b/doc/howto/usage/k8s/k8s_distributed_cn.md index 2063b98ca8aab9c348fe2b53bb1e6d96b7750dd3..3121b3f59df650c0a22d0bd305a6f793b202d30e 100644 --- a/doc/howto/usage/k8s/k8s_distributed_cn.md +++ b/doc/howto/usage/k8s/k8s_distributed_cn.md @@ -2,181 +2,96 @@ 前一篇文章介绍了如何在Kubernetes集群上启动一个单机PaddlePaddle训练作业 (Job)。在这篇文章里,我们介绍如何在Kubernetes集群上进行分布式PaddlePaddle训练作业。关于PaddlePaddle的分布式训练,文章 [Cluster Training](https://github.com/baidu/Paddle/blob/develop/doc/cluster/opensource/cluster_train.md)介绍了一种通过SSH远程分发任务,进行分布式训练的方法,与此不同的是,本文将介绍在Kubernetes容器管理平台上快速构建PaddlePaddle容器集群,进行分布式训练的方案。 -## Kubernetes 基本概念 - -[*Kubernetes*](http://kubernetes.io/)是Google开源的容器集群管理系统,其提供应用部署、维护、 扩展机制等功能,利用Kubernetes能方便地管理跨机器运行容器化的应用。Kubernetes可以在物理机或虚拟机上运行,且支持部署到[AWS](http://kubernetes.io/docs/getting-started-guides/aws),[Azure](http://kubernetes.io/docs/getting-started-guides/azure/),[GCE](http://kubernetes.io/docs/getting-started-guides/gce)等多种公有云环境。介绍分布式训练之前,需要对[Kubernetes](http://kubernetes.io/)有一个基本的认识,下面先简要介绍一下本文用到的几个Kubernetes概念。 - -- [*Node*](http://kubernetes.io/docs/admin/node/) 表示一个Kubernetes集群中的一个工作节点,这个节点可以是物理机或者虚拟机,Kubernetes集群就是由node节点与master节点组成的。 - -- [*Pod*](http://kubernetes.io/docs/user-guide/pods/) 是一组(一个或多个)容器,pod是Kubernetes的最小调度单元,一个pod中的所有容器会被调度到同一个node上。Pod中的容器共享NET,PID,IPC,UTS等Linux namespace。由于容器之间共享NET namespace,所以它们使用同一个IP地址,可以通过*localhost*互相通信。不同pod之间可以通过IP地址访问。 - -- [*Job*](http://kubernetes.io/docs/user-guide/jobs/) 是Kubernetes上运行的作业,一次作业称为一个job,通常每个job包括一个或者多个pods。 - -- [*Volume*](http://kubernetes.io/docs/user-guide/volumes/) 存储卷,是pod内的容器都可以访问的共享目录,也是容器与node之间共享文件的方式,因为容器内的文件都是暂时存在的,当容器因为各种原因被销毁时,其内部的文件也会随之消失。通过volume,就可以将这些文件持久化存储。Kubernetes支持多种volume,例如hostPath(宿主机目录),gcePersistentDisk,awsElasticBlockStore等。 - -- [*Namespaces*](http://kubernetes.io/docs/user-guide/volumes/) 命名空间,在kubernetes中创建的所有资源对象(例如上文的pod,job)等都属于一个命名空间,在同一个命名空间中,资源对象的名字是唯一的,不同空间的资源名可以重复,命名空间主要为了对象进行逻辑上的分组便于管理。本文只使用了默认命名空间。 +有关Kubernetes相关概念以及如何搭建和配置Kubernetes集群,可以参考[k8s_basis](./k8s_basis_cn.md)。 ## 整体方案 -### 部署Kubernetes集群 - -首先,我们需要拥有一个Kubernetes集群,在这个集群中所有node与pod都可以互相通信。关于Kubernetes集群搭建,可以参考[官方文档](http://kubernetes.io/docs/getting-started-guides/kubeadm/),在以后的文章中我们也会介绍AWS上搭建的方案。本文假设大家能找到几台物理机,并且可以按照官方文档在上面部署Kubernetes。在本文的环境中,Kubernetes集群中所有node都挂载了一个[MFS](http://moosefs.org/)(Moose filesystem,一种分布式文件系统)共享目录,我们通过这个目录来存放训练文件与最终输出的模型。关于MFS的安装部署,可以参考[MooseFS documentation](https://moosefs.com/documentation.html)。在训练之前,用户将配置与训练数据切分好放在MFS目录中,训练时,程序从此目录拷贝文件到容器内进行训练,将结果保存到此目录里。整体的结构图如下: +在训练之前,用户将配置与训练数据切分好放在分布式文件系统预先分配好的目录中(不同的分布式文件系统,需要使用其制定的方式挂载后并导入数据),训练时,程序从此目录拷贝文件到容器内进行训练,将结果保存到此目录里。整体的结构图如下: ![paddle on kubernetes结构图](src/k8s-paddle-arch.png) -上图描述了一个3节点的分布式训练场景,Kubernetes集群的每个node上都挂载了一个MFS目录,这个目录可以通过volume的形式挂载到容器中。Kubernetes为这次训练创建了3个pod并且调度到了3个node上运行,每个pod包含一个PaddlePaddle容器。在容器创建后,会启动pserver与trainer进程,读取volume中的数据进行这次分布式训练。 - -### 使用 Job - -我们使用Kubernetes中的job这个概念来代表一次分布式训练。Job表示一次性作业,在作业完成后,Kubernetes会销毁job产生的容器并且释放相关资源。 - -在Kubernetes中,可以通过编写一个YAML文件,来描述这个job,在这个文件中,主要包含了一些配置信息,例如PaddlePaddle的节点个数,`paddle pserver`开放的端口个数与端口号,使用的网卡设备等,这些信息通过环境变量的形式传递给容器内的程序使用。 - -在一次分布式训练中,用户确定好本次训练需要的PaddlePaddle节点个数,将切分好的训练数据与配置文件上传到MFS共享目录中。然后编写这次训练的job YAML文件,提交给Kubernetes集群创建并开始作业。 - -### 创建PaddlePaddle节点 +上图描述了一个3节点的分布式训练场景,在每个Pod上都通过volume方式挂载分布式文件系统的一个目录用于保存训练数据和输出结果。Kubernetes为这次训练创建了3个pod并且调度到了3个node上运行,每个pod包含一个PaddlePaddle容器。在容器创建后,会启动pserver与trainer进程,读取volume中的数据进行这次分布式训练。 -当Kubernetes master收到请求,解析完YAML文件后,会创建出多个pod(个数为PaddlePaddle节点数),Kubernetes会把这些pod调度到集群的node上运行。一个pod就代表一个PaddlePaddle节点,当pod被成功分配到一台物理/虚拟机上后,Kubernetes会启动pod内的容器,这个容器会根据YAML文件中的环境变量,启动`paddle pserver`与`paddle train`进程。 +根据前文的描述,要在已有的Kubernetes集群上进行PaddlePaddle的分布式训练,按照下面步骤即可: -### 启动训练 - -在容器启动后,会通过脚本来启动这次分布式训练,我们知道`paddle train`进程启动时需要知道其他节点的IP地址以及本节点的trainer_id,由于PaddlePaddle本身不提供类似服务发现的功能,所以在本文的启动脚本中,每个节点会根据job name向Kubernetes apiserver查询这个job对应的所有pod信息(Kubernetes默认会在每个容器的环境变量中写入apiserver的地址)。 - -根据这些pod信息,就可以通过某种方式,为每个pod分配一个唯一的trainer_id。本文把所有pod的IP地址进行排序,将顺序作为每个PaddlePaddle节点的trainer_id。启动脚本的工作流程大致如下: - - 1. 查询Kubernetes apiserver获取pod信息,根据IP分配trainer_id - 1. 从MFS共享目录中拷贝训练文件到容器内 - 1. 根据环境变量,解析出`paddle pserver`与`paddle train`的启动参数,启动进程 - 1. 训练时,PaddlePaddle会自动将结果保存在trainer_id为0的节点上,将输出路径设置为MFS目录,保存输出的文件 - - -## 搭建过程 - -根据前文的描述,要在已有的Kubernetes集群上进行PaddlePaddle的分布式训练,主要分为以下几个步骤: - -1. 制作PaddlePaddle镜像 -1. 将训练文件与切分好的数据上传到共享存储 -1. 编写本次训练的YAML文件,创建一个Kubernetes job -1. 训练结束后查看输出结果 +1. [制作PaddlePaddle镜像](#制作镜像) +1. [将训练文件与切分好的数据上传到共享存储](#上传训练文件) +1. [编写本次训练的YAML文件,创建一个Kubernetes job](#创建Job) +1. [训练结束后查看输出结果](#查看输出) 下面就根据这几个步骤分别介绍。 - ### 制作镜像 PaddlePaddle镜像需要提供`paddle pserver`与`paddle train`进程的运行环境,用这个镜像创建的容器需要有以下两个功能: - 拷贝训练文件到容器内 - - 生成`paddle pserver`与`paddle train`进程的启动参数,并且启动训练 -因为官方镜像 `paddledev/paddle:cpu-latest` 内已经包含PaddlePaddle的执行程序但是还没上述功能,所以我们可以在这个基础上,添加启动脚本,制作新镜像来完成以上的工作。镜像的*Dockerfile*如下: - -```Dockerfile -FROM paddledev/paddle:cpu-latest - -MAINTAINER zjsxzong89@gmail.com - -COPY start.sh /root/ -COPY start_paddle.py /root/ -CMD ["bash"," -c","/root/start.sh"] -``` - -[start.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/k8s/start.sh)文件拷贝训练文件到容器内,然后执行[start_paddle.py](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/k8s/start_paddle.py)脚本启动训练,前文提到的获取其他节点IP地址,分配`trainer_id`等都在`start_paddle.py`脚本中完成。 - -`start_paddle.py`脚本开始时,会先进行参数的初始化与解析。 - -```python -parser = argparse.ArgumentParser(prog="start_paddle.py", - description='simple tool for k8s') - args, train_args_list = parser.parse_known_args() - train_args = refine_unknown_args(train_args_list) - train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) - podlist = getPodList() -``` - -然后通过函数`getPodList()`访问Kubernetes的接口来查询此job对应的所有pod信息。当所有pod都处于running状态(容器运行都运行)时,再通过函数`getIdMap(podlist)`获取trainer_id。 - -```python - podlist = getPodList() - # need to wait until all pods are running - while not isPodAllRunning(podlist): - time.sleep(10) - podlist = getPodList() - idMap = getIdMap(podlist) -``` - -在函数`getIdMap(podlist)`内部,我们通过读取`podlist`中每个pod的IP地址,将IP排序生成的序号作为trainer_id。 - -```python -def getIdMap(podlist): - ''' - generate tainer_id by ip - ''' - ips = [] - for pod in podlist["items"]: - ips.append(pod["status"]["podIP"]) - ips.sort() - idMap = {} - for i in range(len(ips)): - idMap[ips[i]] = i - return idMap -``` - -在得到`idMap`后,通过函数`startPaddle(idMap, train_args_dict)`构造`paddle pserver`与`paddle train`的启动参数并执行进程。 - -在函数`startPaddle`中,最主要的工作就是解析出`paddle pserver`与`paddle train`的启动参数。例如`paddle train`参数的解析,解析环境变量得到`PADDLE_NIC`,`PADDLE_PORT`,`PADDLE_PORTS_NUM`等参数,然后通过自身的IP地址在`idMap`中获取`trainerId`。 - -```python - program = 'paddle train' - args = " --nics=" + PADDLE_NIC - args += " --port=" + str(PADDLE_PORT) - args += " --ports_num=" + str(PADDLE_PORTS_NUM) - args += " --comment=" + "paddle_process_by_paddle" - ip_string = "" - for ip in idMap.keys(): - ip_string += (ip + ",") - ip_string = ip_string.rstrip(",") - args += " --pservers=" + ip_string - args_ext = "" - for key, value in train_args_dict.items(): - args_ext += (' --' + key + '=' + value) - localIP = socket.gethostbyname(socket.gethostname()) - trainerId = idMap[localIP] - args += " " + args_ext + " --trainer_id=" + \ - str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT -``` - -使用 `docker build` 构建镜像: +因为官方镜像 `paddledev/paddle:cpu-latest` 内已经包含PaddlePaddle的执行程序但是还没上述功能,所以我们可以在这个基础上,添加启动脚本,制作新镜像来完成以上的工作。参考镜像的[*Dockerfile*](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/k8s/src/k8s_train/Dockerfile)。 ```bash -docker build -t your_repo/paddle:mypaddle . +$ cd doc/howto/usage/k8s/src/k8s_train +$ docker build -t [YOUR_REPO]/paddle:mypaddle . ``` 然后将构建成功的镜像上传到镜像仓库。 ```bash -docker push your_repo/paddle:mypaddle +docker push [YOUR_REPO]/paddle:mypaddle ``` -注意上述命令中`your_repo`表示读者所使用的Docker镜像仓库地址,读者需要替换成自己使用的仓库地址。下文使用`your_repo/paddle:mypaddle`这个地址来表示此步骤所构建出的镜像。 +注意上述命令中`[YOUR_REPO]`表示读者所使用的Docker镜像仓库地址,读者需要替换成自己使用的仓库地址。下文使用`[YOUR_REPO]/paddle:mypaddle`这个地址来表示此步骤所构建出的镜像。 -### 上传训练文件 +### 准备训练数据 -本文使用PaddlePaddle官方的[recommendation demo](http://www.paddlepaddle.org/doc/demo/index.html#recommendation)作为这次训练的内容,我们将训练文件与数据放在一个job name命名的目录中,上传到MFS共享存储。完成后MFS上的文件内容大致如下: +这里我们通过在Kubernetes集群上启动一个Job来下载并切割数据,也可以通过修改[k8s_train](./src/k8s_train/README.md)的内容来定制image. -```bash -[root@paddle-kubernetes-node0 mfs]# tree -d +在启动Job之前,需要根据不同的分布式存储来绑定一个[persistentVolumeClaim](https://kubernetes.io/docs/user-guide/persistent-volumes/),生成的数据将会存储在这个volume下. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-data +spec: + template: + metadata: + name: pi + spec: + hostNetwork: true + containers: + - name: paddle-data + image: paddledev/paddle-tutorial:k8s_data + imagePullPolicy: Always + volumeMounts: + - mountPath: "/mnt" + name: nfs + env: + - name: OUT_DIR + value: /home/work/mfs/paddle-cluster-job + - name: SPLIT_COUNT + value: "3" + volumes: + - name: nfs + persistentVolumeClaim: + claimName: mfs + restartPolicy: Never +``` + +完成后volume中的文件内容大致如下: +```base +[root@paddle-kubernetes-node0 nfsdir]$ tree -d . -└── paddle-cluster-job - ├── data - │   ├── 0 - │   │ - │   ├── 1 - │   │ - │   └── 2 - ├── output - └── recommendation +`-- paddle-cluster-job + |-- 0 + | `-- data + |-- 1 + | `-- data + |-- 2 + | `-- data + |-- output + |-- quick_start ``` 目录中paddle-cluster-job是本次训练对应的job name,本次训练要求有3个PaddlePaddle节点,在paddle-cluster-job/data目录中存放切分好的数据,文件夹0,1,2分别代表3个节点的trainer_id。recommendation文件夹内存放训练文件,output文件夹存放训练结果与日志。 @@ -205,7 +120,7 @@ spec: path: /home/work/mfs containers: - name: trainer - image: your_repo/paddle:mypaddle + image: [YOUR_REPO]/paddle:mypaddle command: ["bin/bash", "-c", "/root/start.sh"] env: - name: JOB_NAME @@ -236,15 +151,16 @@ spec: `env`字段表示容器的环境变量,我们将`paddle`运行的一些参数通过这种方式传递到容器内。 -`JOB_PATH`表示共享存储挂载的路径,`JOB_NAME`表示job名字,`TRAIN_CONFIG_DIR`表示本次训练文件所在目录,这三个变量组合就可以找到本次训练需要的文件路径。 - -`CONF_PADDLE_NIC`表示`paddle pserver`进程需要的`--nics`参数,即网卡名 - -`CONF_PADDLE_PORT`表示`paddle pserver`的`--port`参数,`CONF_PADDLE_PORTS_NUM`则表示稠密更新的端口数量,也就是`--ports_num`参数。 - -`CONF_PADDLE_PORTS_NUM_SPARSE`表示稀疏更新的端口数量,也就是`--ports_num_for_sparse`参数。 - -`CONF_PADDLE_GRADIENT_NUM`表示训练节点数量,即`--num_gradient_servers`参数 +环境变量 | 说明 +--- | --- +JOB_PATH | 共享存储挂在的路径 +JOB_NAME | Job的名字 +TRAIN_CONFIG_DIR | 本次训练文件所在目录,与JOB_PATH,JOB_NAME组合可以找到本次训练需要的文件路径 +CONF_PADDLE_NIC | `paddle pserver`进程需要的`--nics`参数,即网卡名 +CONF_PADDLE_PORT | `paddle paserver`的`--port`参数 +CONF_PADDLE_PORTS_NUM | 稠密更新的端口数量,即`--ports_num`参数 +CONF_PADDLE_PORTS_NUM_SPARSE | 稀疏更新的端口数量,即`--ports_num_for_sparse`参数 +CONF_PADDLE_GRADIENT_NUM | 训练节点数量,即`--num_gradient_servers参数` 这些参数的具体描述,读者可以查看[这里](http://www.paddlepaddle.org/doc/ui/cmd_argument/detail_introduction.html#parameter-server-and-distributed-communication)。 @@ -289,15 +205,15 @@ I1116 09:10:17.123121 50 Util.cpp:155] commandline: --ports_num=2 --comment=paddle_process_by_paddle --pservers=192.168.129.66,192.168.223.143,192.168.129.71 --ports_num_for_sparse=2 --config=./trainer_config.py - --trainer_count=4 --num_passes=10 --use_gpu=0 - --log_period=50 --dot_period=10 --saving_period=1 + --trainer_count=4 --num_passes=10 --use_gpu=0 + --log_period=50 --dot_period=10 --saving_period=1 --local=0 --trainer_id=0 --save_dir=/home/jobpath/paddle-cluster-job/output I1116 09:10:17.123440 50 Util.cpp:130] Calling runInitFunctions I1116 09:10:17.123764 50 Util.cpp:143] Call runInitFunctions done. [WARNING 2016-11-16 09:10:17,227 default_decorators.py:40] please use keyword arguments in paddle config. [INFO 2016-11-16 09:10:17,239 networks.py:1282] The input order is [movie_id, title, genres, user_id, gender, age, occupation, rating] -[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__regression_cost_0__] +[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__mse_cost_0__] I1116 09:10:17.392917 50 Trainer.cpp:170] trainer mode: Normal I1116 09:10:17.613910 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process I1116 09:10:17.680917 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process @@ -310,3 +226,90 @@ I1116 09:10:18.019492 50 ParameterClient2.cpp:122] pserver 3 192.168.223.143: I1116 09:10:18.019716 50 ParameterClient2.cpp:122] pserver 4 192.168.129.71:7164 I1116 09:10:18.019836 50 ParameterClient2.cpp:122] pserver 5 192.168.129.71:7165 ``` + + +## 一些细节的补充 + +### 使用环境变量 + +使用容器方式运行训练任务的Kubernetes Job,通常会使用环境变量配置Job的配置信息`start_paddle.py`提供了一个启动脚本,将环境变量转换成paddle的命令行参数: +``` +API = "/api/v1/namespaces/" +JOBSELECTOR = "labelSelector=job-name=" +JOB_PATH = os.getenv("JOB_PATH") + "/" + os.getenv("JOB_NAME") +JOB_PATH_OUTPUT = JOB_PATH + "/output" +JOBNAME = os.getenv("JOB_NAME") +NAMESPACE = os.getenv("JOB_NAMESPACE") +PADDLE_NIC = os.getenv("CONF_PADDLE_NIC") +PADDLE_PORT = os.getenv("CONF_PADDLE_PORT") +PADDLE_PORTS_NUM = os.getenv("CONF_PADDLE_PORTS_NUM") +PADDLE_PORTS_NUM_SPARSE = os.getenv("CONF_PADDLE_PORTS_NUM_SPARSE") +PADDLE_SERVER_NUM = os.getenv("CONF_PADDLE_GRADIENT_NUM") +``` + +### Pod间通信 +`start_paddle.py`脚本开始时,会先进行参数的初始化与解析。 + +```python +parser = argparse.ArgumentParser(prog="start_paddle.py", + description='simple tool for k8s') + args, train_args_list = parser.parse_known_args() + train_args = refine_unknown_args(train_args_list) + train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) + podlist = getPodList() +``` + +然后通过函数`getPodList()`访问Kubernetes的接口来查询此job对应的所有pod信息。当所有pod都处于running状态(容器运行都运行)时,再通过函数`getIdMap(podlist)`获取trainer_id。 + +```python + podlist = getPodList() + # need to wait until all pods are running + while not isPodAllRunning(podlist): + time.sleep(10) + podlist = getPodList() + idMap = getIdMap(podlist) +``` +* *注意*: `getPodList()`会获取当前namespace下的所有pod,如果已经有pod运行,可能会导致出错。这种集群节点管理方式会在将来使用[statfulsets](https://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/)代替。 + +在函数`getIdMap(podlist)`内部,我们通过读取`podlist`中每个pod的IP地址,将IP排序生成的序号作为trainer_id。 + +```python +def getIdMap(podlist): + ''' + generate tainer_id by ip + ''' + ips = [] + for pod in podlist["items"]: + ips.append(pod["status"]["podIP"]) + ips.sort() + idMap = {} + for i in range(len(ips)): + idMap[ips[i]] = i + return idMap +``` + +在得到`idMap`后,通过函数`startPaddle(idMap, train_args_dict)`构造`paddle pserver`与`paddle train`的启动参数并执行进程。 + +### 启动任务 + +在函数`startPaddle`中,最主要的工作就是解析出`paddle pserver`与`paddle train`的启动参数。例如`paddle train`参数的解析,解析环境变量得到`PADDLE_NIC`,`PADDLE_PORT`,`PADDLE_PORTS_NUM`等参数,然后通过自身的IP地址在`idMap`中获取`trainerId`。 + +```python + program = 'paddle train' + args = " --nics=" + PADDLE_NIC + args += " --port=" + str(PADDLE_PORT) + args += " --ports_num=" + str(PADDLE_PORTS_NUM) + args += " --comment=" + "paddle_process_by_paddle" + ip_string = "" + for ip in idMap.keys(): + ip_string += (ip + ",") + ip_string = ip_string.rstrip(",") + args += " --pservers=" + ip_string + args_ext = "" + for key, value in train_args_dict.items(): + args_ext += (' --' + key + '=' + value) + localIP = socket.gethostbyname(socket.gethostname()) + trainerId = idMap[localIP] + args += " " + args_ext + " --trainer_id=" + \ + str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT +``` diff --git a/doc/howto/usage/k8s/src/k8s-paddle-arch.png b/doc/howto/usage/k8s/src/k8s-paddle-arch.png index a8c64550b1fa7f41de1eaa9a037c65cddc0cd30e..2183a232ad402b76f82a67234a5c93e13ce97ac3 100644 Binary files a/doc/howto/usage/k8s/src/k8s-paddle-arch.png and b/doc/howto/usage/k8s/src/k8s-paddle-arch.png differ diff --git a/doc/howto/usage/k8s/src/k8s_train/start_paddle.py b/doc/howto/usage/k8s/src/k8s_train/start_paddle.py index f1a770ccb54fbd7d4c3cf6bf134d00d7bf5961ca..935c12bb67e1fe08bc135a7a2220fcd43c548482 100755 --- a/doc/howto/usage/k8s/src/k8s_train/start_paddle.py +++ b/doc/howto/usage/k8s/src/k8s_train/start_paddle.py @@ -132,7 +132,8 @@ def startPaddle(idMap={}, train_args_dict=None): logDir = JOB_PATH_OUTPUT + "/node_" + str(trainerId) if not os.path.exists(JOB_PATH_OUTPUT): os.makedirs(JOB_PATH_OUTPUT) - os.mkdir(logDir) + if not os.path.exists(logDir): + os.mkdir(logDir) copyCommand = 'cp -rf ' + JOB_PATH + \ "/" + str(trainerId) + "/data/*" + " ./data/" os.system(copyCommand) diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 418d718fbd9c61bff3acb9c2dab0638c0b650bab..95cad835b11816f4d2e256c2abd662a545a5bad2 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -15,13 +15,19 @@ import sys import os, subprocess import shlex from recommonmark import parser, transform +try: + import py_paddle + import paddle + import paddle.v2 +except ImportError: + print("Must install paddle python package before generating documentation") + sys.exit(1) MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, '@PROJ_ROOT@/python') templates_path = ["@PROJ_ROOT@/doc_theme/templates"] # -- General configuration ------------------------------------------------ @@ -49,6 +55,7 @@ extensions = [ 'sphinx.ext.napoleon', 'sphinx.ext.graphviz' ] +mathjax_path="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js" table_styling_embed_css = True autodoc_member_order = 'bysource' diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index e96c25cb75bee20d2e2949423d80ddab1d3450a1..b477f0120c4fa0544012080b7cfb8572d3c44b04 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -15,14 +15,20 @@ import sys import os, subprocess import shlex from recommonmark import parser, transform +try: + import py_paddle + import paddle + import paddle.v2 +except ImportError: + print("Must install paddle python package before generating documentation") + sys.exit(1) + MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, '@PROJ_ROOT@/python') - templates_path = ["@PROJ_ROOT@/doc_theme/templates"] # -- General configuration ------------------------------------------------ diff --git a/doc/tutorials/embedding_model/index_cn.md b/doc/tutorials/embedding_model/index_cn.md index fe800308d8d7a03619ec8e13fd8dc4aa7a8ed8be..2b4a79fbbfc0c4af74aa73c540919f5d9cf2635b 100644 --- a/doc/tutorials/embedding_model/index_cn.md +++ b/doc/tutorials/embedding_model/index_cn.md @@ -6,9 +6,10 @@ ## 介绍 ### ### 中文字典 ### -我们的字典使用内部的分词工具对百度知道和百度百科的语料进行分词后产生。分词风格如下: "《红楼梦》"将被分为 "《","红楼梦","》",和 "《红楼梦》"。字典采用UTF8编码,输出有2列:词本身和词频。字典共包含 3206325个词和3个特殊标记: +我们的字典使用内部的分词工具对百度知道和百度百科的语料进行分词后产生。分词风格如下: "《红楼梦》"将被分为 "《","红楼梦","》",和 "《红楼梦》"。字典采用UTF8编码,输出有2列:词本身和词频。字典共包含 3206326个词和4个特殊标记: - ``: 分词序列的开始 - ``: 分词序列的结束 + - `PALCEHOLDER_JUST_IGNORE_THE_EMBEDDING`: 占位符,没有实际意义 - ``: 未知词 ### 中文词向量的预训练模型 ### diff --git a/doc/tutorials/embedding_model/index_en.md b/doc/tutorials/embedding_model/index_en.md index d793a50f488e464bcd90a2fb506a8dcc3c760433..9525f64f9b5384c8e44690fb0887fb2293108e0a 100644 --- a/doc/tutorials/embedding_model/index_en.md +++ b/doc/tutorials/embedding_model/index_en.md @@ -6,9 +6,10 @@ We thank @lipeng for the pull request that defined the model schemas and pretrai ## Introduction ### ### Chinese Word Dictionary ### -Our Chinese-word dictionary is created on Baidu ZhiDao and Baidu Baike by using in-house word segmentor. For example, the participle of "《红楼梦》" is "《","红楼梦","》",and "《红楼梦》". Our dictionary (using UTF-8 format) has has two columns: word and its frequency. The total word count is 3206325, including 3 special token: +Our Chinese-word dictionary is created on Baidu ZhiDao and Baidu Baike by using in-house word segmentor. For example, the participle of "《红楼梦》" is "《","红楼梦","》",and "《红楼梦》". Our dictionary (using UTF-8 format) has has two columns: word and its frequency. The total word count is 3206326, including 4 special token: - ``: the start of a sequence - ``: the end of a sequence + - `PALCEHOLDER_JUST_IGNORE_THE_EMBEDDING`: a placeholder, just ignore it and its embedding - ``: a word not included in dictionary ### Pretrained Chinese Word Embedding Model ### diff --git a/doc/tutorials/quick_start/index_en.md b/doc/tutorials/quick_start/index_en.md index 70dec2eb2a8c397bc56b1e6f52a624a3a6877905..ca110431cf921ae0480d3fb2b17c58f90a84cc0e 100644 --- a/doc/tutorials/quick_start/index_en.md +++ b/doc/tutorials/quick_start/index_en.md @@ -156,14 +156,14 @@ define_py_data_sources2(train_list='data/train.list', obj="process", args={"dictionary": word_dict}) ``` -You can refer to the following link for more detailed examples and data formats: PyDataProvider2. +You can refer to the following link for more detailed examples and data formats: PyDataProvider2. ## Network Architecture We will describe four kinds of network architectures in this section.
![](./src/PipelineNetwork_en.jpg)
First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures. -For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory. +For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory. ### Logistic Regression The architecture is illustrated in the following picture: @@ -366,7 +366,7 @@ You can use single layer LSTM model with Dropout for our text classification pro
## Optimization Algorithm -Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. +Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. ```python settings(batch_size=128, @@ -407,7 +407,7 @@ paddle train \ --init_model_path=./output/pass-0000x ``` -We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. +We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. inference script (predict.sh): diff --git a/doc_theme/static/css/override.css b/doc_theme/static/css/override.css index 438a87848a0176a7857177aeb672c59f35bd8d4b..09ecff688b9a2dae3d834572217922640c529c5e 100644 --- a/doc_theme/static/css/override.css +++ b/doc_theme/static/css/override.css @@ -1,3 +1,6 @@ +* { + font-family:"Roboto","Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; +} body { padding-top: 80px; background-image: none !important; diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 503024cff338dac42a6a8a32463472dc6b6451d9..9d6d67e62c106b2298ce1ebae5633d03bba1e684 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -9,13 +9,8 @@ add_subdirectory(pserver) add_subdirectory(trainer) add_subdirectory(scripts) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in - ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) - -if(WITH_PREDICT_SDK) - add_subdirectory(predict) -endif() - if(WITH_SWIG_PY) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in + ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) add_subdirectory(api) endif() diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp index 41beed38a87601cb57072c8966cd0fd2ea156524..d49b189e253f7a0792fe3f1fe7c8fdbb7071acd4 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/api/Arguments.cpp @@ -38,6 +38,13 @@ Arguments* Arguments::createByPaddleArgumentVector(void* ptr) { return args; } +Arguments* Arguments::createByPaddleArgument(const void* ptr) { + auto p = (paddle::Argument*)(ptr); + auto args = new Arguments(); + args->m->outputs.push_back(*p); + return args; +} + Matrix* Arguments::getSlotValue(size_t idx) const throw(RangeError) { auto& a = m->getArg(idx); return Matrix::createByPaddleMatrixPtr(&a.value); @@ -137,9 +144,7 @@ void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) { a.cpuSequenceDims = m->cast(vec->getSharedPtr()); } -float Arguments::sumCosts() const { - return paddle::Argument::sumCosts(m->outputs); -} +float Arguments::sum() const { return paddle::Argument::sum(m->outputs); } int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) { auto& a = m->getArg(idx); diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 6e8fcd114df580a00858d95f0af0d1ec0bd9b4a2..4d0dacae9058f94e584f313c9d0e31b5af09e82d 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -1,21 +1,3 @@ -FUNCTION(generate_python_api target_name) - ADD_CUSTOM_COMMAND(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - COMMAND ${SWIG_EXECUTABLE} -python -c++ -outcurrentdir -I../ api/Paddle.swig - && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig - ${PROJ_ROOT}/paddle/api/PaddleAPI.h - ${external_project_dependencies} - WORKING_DIRECTORY ${PROJ_ROOT}/paddle - COMMENT "Generate Python API from swig") - ADD_CUSTOM_TARGET(${target_name} ALL DEPENDS - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - ${external_project_dependencies}) -ENDFUNCTION(generate_python_api) - set(API_SOURCES Arguments.cpp ConfigParser.cpp @@ -33,65 +15,84 @@ set(API_HEADER PaddleAPI.h Internal.h) -add_library(paddle_api STATIC - ${API_SOURCES}) +add_library(paddle_api STATIC ${API_SOURCES}) add_dependencies(paddle_api gen_proto_cpp) -list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH) +INCLUDE(${SWIG_USE_FILE}) +INCLUDE_DIRECTORIES(${PROJ_ROOT}/paddle) -if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}") -# Because gflags compiled by cmake, so it is imported by cmake target, -# not a real library path. Get the real library path here. -message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}") -get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION) -message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}") -else() -set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES}) -endif() +FILE(GLOB PY_PADDLE_PYTHON_FILES ${PROJ_ROOT}/paddle/py_paddle/*.py) + +SET_SOURCE_FILES_PROPERTIES(Paddle.i PROPERTIES CPLUSPLUS ON) + +SET(CMAKE_SWIG_OUTDIR ${CMAKE_CURRENT_BINARY_DIR}) +SET(CMAKE_CXX_FLAGS "-std=c++11 -fPIC -Wall") +IF(WITH_COVERAGE) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") +ENDIF(WITH_COVERAGE) -configure_file( - paddle_api_config.py.in - ${PROJ_ROOT}/paddle/api/paddle_api_config.py +SET(SWIG_MODULE_swig_paddle_EXTRA_DEPS + paddle_parameter + paddle_function + paddle_math + paddle_utils + paddle_gserver + paddle_pserver + paddle_api + paddle_cuda + paddle_trainer_lib + paddle_network + paddle_proto + ${external_project_dependencies} ) -generate_python_api(python_swig_sources) +IF(APPLE) + SET(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load") +ELSE(APPLE) + SET(START_GROUP "-Xlinker -start-group") + SET(END_GROUP "-Xlinker -end-group") + SET(ARCHIVE_START "-Wl,--whole-archive") + SET(ARCHIVE_END "-Wl,--no-whole-archive") +ENDIF(APPLE) -file(GLOB PY_PADDLE_PYTHON_FILES ${PROJ_ROOT}/paddle/py_paddle/*.py) +SWIG_ADD_MODULE(swig_paddle python Paddle.i) +SWIG_LINK_LIBRARIES(swig_paddle + ${MACOS_LD_FLAGS} + ${START_GROUP} + ${ARCHIVE_START} + paddle_gserver + paddle_function + ${METRIC_LIBS} + ${ARCHIVE_END} + paddle_pserver + paddle_trainer_lib + paddle_network + paddle_parameter + paddle_math + paddle_utils + paddle_proto + paddle_cuda + paddle_api + ${CMAKE_DL_LIBS} + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${START_END} +) -# TODO(yuyang18) : make wheel name calculated by cmake -add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp +add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PROJ_ROOT}/paddle/py_paddle COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch dist/.timestamp COMMAND rm -rf py_paddle.egg-info build WORKING_DIRECTORY ${PROJ_ROOT}/paddle - DEPENDS python_swig_sources - paddle_parameter - paddle_function - paddle_math - paddle_utils - paddle_gserver - paddle_pserver - paddle_trainer - paddle_api - paddle_cuda - ${PY_PADDLE_PYTHON_FILES} + DEPENDS _swig_paddle ) -install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ - DESTINATION opt/paddle/share/wheels -) +# TODO(yuyang18) : make wheel name calculated by cmake +add_custom_target(python_api_wheel ALL DEPENDS ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so) -add_custom_target(python_api_wheel ALL DEPENDS - ${PROJ_ROOT}/paddle/dist/.timestamp) -add_dependencies(python_api_wheel python_swig_sources - paddle_parameter - paddle_math - paddle_utils - paddle_gserver - paddle_pserver - paddle_trainer - paddle_api - paddle_cuda) +install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ DESTINATION opt/paddle/share/wheels) if(WITH_TESTING) IF(NOT PY_PIP_FOUND) diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 66115f8293b905809639afff779abfdb2bb3a54e..dcb5fe086fdccf8ec62ee52cbaaac4b7dbbe2f9d 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -142,14 +142,28 @@ Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) { } } +size_t GradientMachine::getNonStaticParameterSize() const { + return m->machine->getNonStaticParameters().size(); +} + +Parameter* GradientMachine::getNonStaticParameter(size_t i) throw(RangeError) { + auto params = m->machine->getNonStaticParameters(); + if (i < params.size()) { + return Parameter::createFromSharedPtr( + &m->machine->getNonStaticParameters()[i]); + } else { + throw RangeError(); + } +} + void GradientMachine::randParameters() { m->machine->randParameters(); } -Matrix* GradientMachine::getLayerOutput(const std::string& layerName) const +Arguments* GradientMachine::getLayerOutput(const std::string& layerName) const throw(UnsupportError) { - auto nn = std::dynamic_pointer_cast(m->machine); + auto nn = m->machine; if (nn) { - auto mat = nn->getLayerOutput(layerName); - return Matrix::createByPaddleMatrixPtr(&mat); + auto arg = nn->getLayerOutput(layerName); + return Arguments::createByPaddleArgument(&arg); } else { throw UnsupportError(); } diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.i similarity index 100% rename from paddle/api/Paddle.swig rename to paddle/api/Paddle.i diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 80c50cdb08bb16443f7772964c49cc1fbc591f37..c4f5dca26cc6a5e9fdd23ee27b594ced29a25c7a 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -47,6 +47,9 @@ void setUseGpu(bool useGpu); /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); +/// Return FLAGS_trainer_count +int getTrainerCount(); + /// The Error of IO Operation. Such as file not found, etc. class IOError {}; @@ -450,10 +453,11 @@ public: IVector* vec) throw(RangeError); void setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError); - float sumCosts() const; + float sum() const; private: static Arguments* createByPaddleArgumentVector(void* ptr); + static Arguments* createByPaddleArgument(const void* ptr); void* getInternalArgumentsPtr() const; private: @@ -767,9 +771,12 @@ public: size_t getParameterSize() const; Parameter* getParameter(size_t i) throw(RangeError); + size_t getNonStaticParameterSize() const; + Parameter* getNonStaticParameter(size_t i) throw(RangeError); + void randParameters(); - Matrix* getLayerOutput(const std::string& layerName) const + Arguments* getLayerOutput(const std::string& layerName) const throw(UnsupportError); /** @@ -956,7 +963,7 @@ public: Arguments* getForwardOutput(); - Matrix* getLayerOutput(const std::string& layerName); + Arguments* getLayerOutput(const std::string& layerName) const; }; /// the N-Best results generated from one input sequence. diff --git a/paddle/api/Trainer.cpp b/paddle/api/Trainer.cpp index d83dc380beeec3747451a483f4811eb833e8c226..84e4ca054abb0100a02c8a40e31c49c17684ef40 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/api/Trainer.cpp @@ -131,12 +131,11 @@ void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) { void TrainerPrivate::finishTestPeriod() { tester_->finishTestPeriod(); } void Trainer::finishTestPeriod() { m->finishTestPeriod(); } -Matrix* Trainer::getLayerOutput(const std::string& layerName) { - auto nn = std::dynamic_pointer_cast( - this->m->getGradientMachine()); +Arguments* Trainer::getLayerOutput(const std::string& layerName) const { + auto nn = this->m->getGradientMachine(); CHECK(nn) << "trainerInternal_.getGradientMachine() is not NeuralNetwork"; - auto m = nn->getLayerOutput(layerName); - return Matrix::createByPaddleMatrixPtr(&m); + auto arg = nn->getLayerOutput(layerName); + return Arguments::createByPaddleArgument(&arg); } void Trainer::forwardOneBatch(size_t batchSize) { diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index 54d67aa62f4d87ad03282962c722019698dc621a..d369df5d4e04b4a8d822db0e72a8051150868ce6 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -54,5 +54,7 @@ bool isGpuVersion() { #endif } +int getTrainerCount() { return FLAGS_trainer_count; } + static_assert(NUM_PARAMETER_TYPES == paddle::NUM_PARAMETER_TYPES, "The Parameter Type should be same in core/api and core/common"); diff --git a/paddle/api/paddle_api_config.py.in b/paddle/api/paddle_api_config.py.in deleted file mode 100644 index 82f45ba6ccec49eb190d1814a67a575f311689e8..0000000000000000000000000000000000000000 --- a/paddle/api/paddle_api_config.py.in +++ /dev/null @@ -1,17 +0,0 @@ -PADDLE_BUILD_DIR="@CMAKE_CURRENT_BINARY_DIR@/../" -WITH_GPU="@WITH_GPU@" -PROTOBUF_LIBRARY="@PROTOBUF_LIBRARY@" -ZLIB_LIBRARIES="@ZLIB_LIBRARIES@" -CMAKE_THREAD_LIB="@CMAKE_THREAD_LIBS_INIT@" -CMAKE_DL_LIBS="@CMAKE_DL_LIBS@" - - -WITH_PYTHON="@WITH_PYTHON@" -PYTHON_LIBRARIES="@PYTHON_LIBRARIES@" -GLOG_LIBRARIES="@GLOG_LIBRARIES@" -GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@" -GFLAGS_LOCATION="@GFLAGS_LOCATION@" -CBLAS_LIBRARIES="@CBLAS_LIBRARIES@" - -CUDA_LIBRARIES="@CUDA_CUDART_LIBRARY@" -WITH_COVERALLS="@ON_COVERALLS@" diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py deleted file mode 100644 index ad5dce209bf8e14120320a58c3cd85d6f6a97688..0000000000000000000000000000000000000000 --- a/paddle/api/paddle_ld_flags.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - from paddle_api_config import * - import os.path - import platform - - system = platform.system().lower() - is_osx = (system == 'darwin') - is_win = (system == 'windows') - is_lin = (system == 'linux') - - if is_lin: - whole_start = "-Wl,--whole-archive" - whole_end = "-Wl,--no-whole-archive" - elif is_osx: - whole_start = "" - whole_end = "" - - LIB_DIRS = [ - "math", 'function', 'utils', 'parameter', "gserver", "api", "cuda", - "pserver", "trainer" - ] - PARENT_LIB_DIRS = ['proto'] - - class PaddleLDFlag(object): - def __init__(self): - self.paddle_build_dir = PADDLE_BUILD_DIR - self.paddle_build_dir = os.path.abspath(self.paddle_build_dir) - self.with_gpu = PaddleLDFlag.cmake_bool(WITH_GPU) - self.protolib = PROTOBUF_LIBRARY - self.zlib = ZLIB_LIBRARIES - self.thread = CMAKE_THREAD_LIB - self.dl_libs = CMAKE_DL_LIBS - self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON) - self.python_libs = PYTHON_LIBRARIES - - self.glog_libs = GLOG_LIBRARIES - - self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) - self.gflags_libs = GFLAGS_LIBRARIES - self.gflags_location = GFLAGS_LOCATION - self.cblas_libs = CBLAS_LIBRARIES - self.curt = CUDA_LIBRARIES - - def ldflag_str(self): - return " ".join( - [self.libs_dir_str(), self.parent_dir_str(), self.libs_str()]) - - def libs_dir_str(self): - libdirs = LIB_DIRS - return " ".join( - map(lambda x: "-L" + os.path.join(self.paddle_build_dir, x), - libdirs)) - - def parent_dir_str(self): - libdirs = PARENT_LIB_DIRS - return " ".join( - map(lambda x: "-L" + os.path.join(self.paddle_build_dir, '..', x), - libdirs)) - - def libs_str(self): - libs = [ - whole_start, - "-lpaddle_gserver", - "-lpaddle_function", - whole_end, - "-lpaddle_pserver", - "-lpaddle_trainer_lib", - "-lpaddle_network", - '-lpaddle_parameter', - "-lpaddle_math", - '-lpaddle_utils', - "-lpaddle_proto", - "-lpaddle_cuda", - "-lpaddle_api", - self.normalize_flag(self.protolib), - self.normalize_flag(self.glog_libs), - self.normalize_flag(self.gflags_libs), - self.normalize_flag(self.zlib), - self.normalize_flag(self.thread), - self.normalize_flag(self.dl_libs), - self.normalize_flag(self.cblas_libs), - ] - - if self.with_python: - libs.append(self.normalize_flag(self.python_libs)) - if self.with_gpu: - libs.append(self.normalize_flag(self.curt)) - if self.with_coverage: - libs.append("-fprofile-arcs") - return " ".join(filter(lambda l: len(l) != 0, libs)) - - def normalize_flag(self, cmake_flag): - """ - CMake flag string to ld flag - :type cmake_flag: str - """ - if ";" in cmake_flag: - return " ".join(map(self.normalize_flag, cmake_flag.split(";"))) - if cmake_flag.startswith("/"): # is a path - return cmake_flag - elif cmake_flag.startswith("-l"): # normal link command - return cmake_flag - elif cmake_flag in [ - "gflags-shared", "gflags-static", "gflags_nothreads-shared", - "gflags_nothreads-static" - ]: # special for gflags - assert PaddleLDFlag.cmake_bool(self.gflags_location) - return self.gflags_location - elif len(cmake_flag) != 0: - return "".join(["-l", cmake_flag]) - else: - return "" - - @staticmethod - def cmake_bool(cmake_str): - """ - CMake bool string to bool - :param cmake_str: cmake boolean string - :type cmake_str: str - :rtype: bool - """ - if cmake_str in ["FALSE", "OFF", "NO"] or cmake_str.endswith( - "-NOTFOUND"): - return False - else: - return True - - def c_flag(self): - if self.with_coverage: - return [ - "-fprofile-arcs", "-ftest-coverage", "-O0", "-g", - "-std=c++11" - ] - else: - return ["-std=c++11"] -except ImportError: - - class PaddleLDFlag(object): - def ldflag_str(self): - pass - - def c_flag(self): - pass diff --git a/paddle/api/test/testArguments.py b/paddle/api/test/testArguments.py index a04a805d7a64ef906c8388f1241b9ef823e4d9e0..9fe44de94ea6ddb71d2dfbb2243fc86ede0d0531 100644 --- a/paddle/api/test/testArguments.py +++ b/paddle/api/test/testArguments.py @@ -22,7 +22,7 @@ class TestArguments(unittest.TestCase): args = swig_paddle.Arguments.createArguments(1) args.setSlotValue(0, m) - self.assertAlmostEqual(27.0, args.sumCosts()) + self.assertAlmostEqual(27.0, args.sum()) mat = args.getSlotValue(0) assert isinstance(mat, swig_paddle.Matrix) diff --git a/paddle/cuda/include/hl_cpu_matrix_kernel.cuh b/paddle/cuda/include/hl_cpu_matrix_kernel.cuh index f35bfbc5c8253d632f8089f5037421f527633aad..9c49a4bd2083794e98b099b25944bedec3d5a2ff 100644 --- a/paddle/cuda/include/hl_cpu_matrix_kernel.cuh +++ b/paddle/cuda/include/hl_cpu_matrix_kernel.cuh @@ -17,7 +17,11 @@ limitations under the License. */ #include #include "hl_base.h" +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +#include "hl_neon_matrix_kernel.cuh" +#else #include "hl_sse_matrix_kernel.cuh" +#endif /** * @brief cpu element wise unary operator. diff --git a/paddle/cuda/include/hl_matrix_base.cuh b/paddle/cuda/include/hl_matrix_base.cuh index db35ee2037433163ebb3673edb350e3fab71fba9..8b755c1095c2c4fdb7e74d8cddc948e6a6af380b 100644 --- a/paddle/cuda/include/hl_matrix_base.cuh +++ b/paddle/cuda/include/hl_matrix_base.cuh @@ -66,6 +66,8 @@ typedef BaseOp SSESquaredDiff; typedef BaseOp SSEFirst; typedef BaseOp SSESecond; typedef BaseOp SSEClassificationError; +#elif defined(__ARM__NEON__) || defined(__ARM_NEON) +#include "hl_matrix_base_neon.cuh" #else #include "hl_matrix_base_sse.cuh" #endif diff --git a/paddle/cuda/include/hl_matrix_base_neon.cuh b/paddle/cuda/include/hl_matrix_base_neon.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e13019f5ee24ad600005c99678426ee3808b0e54 --- /dev/null +++ b/paddle/cuda/include/hl_matrix_base_neon.cuh @@ -0,0 +1,161 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#ifndef HL_MATRIX_BASE_NEON_CUH_ +#define HL_MATRIX_BASE_NEON_CUH_ + +namespace aggregate { +class SSESum { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return vaddq_f32(a, b); + } +}; + +class SSEMax { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return vmaxq_f32(a, b); + } +}; + +class SSEMin { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return vminq_f32(a, b); + } +}; +} // namespace aggregate + +namespace base { +namespace unary { +class SSEIdentity { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a) const { + return a; + } +}; +} // namespace unary + +namespace binary { +class SSEAdd { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return vaddq_f32(a, b); + } +}; + +class SSEAdd2 { +public: + static const bool sse = true; + const real p1; + const real p2; + float32x4_t mp1; + float32x4_t mp2; + +public: + SSEAdd2(const real s1, const real s2) : p1(s1), p2(s2) { + mp1 = vdupq_n_f32(p1); + mp2 = vdupq_n_f32(p2); + } + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + float32x4_t tmp1, tmp2; + tmp1 = vmulq_f32(mp1, a); + tmp2 = vmulq_f32(mp2, b); + return vaddq_f32(tmp1, tmp2); + } +}; + +class SSESub { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return vsubq_f32(a, b); + } +}; + +class SSEMul { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return vmulq_f32(a, b); + } +}; + +class SSEDiv { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + float32x4_t tmp; + tmp = vrecpeq_f32(b); + return vmulq_f32(a, tmp); + } +}; + +class SSESquaredDiff { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + float32x4_t tmp; + tmp = vsubq_f32(a, b); + return vmulq_f32(tmp, tmp); + } +}; + +class SSEFirst { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return a; + } +}; + +class SSESecond { +public: + static const bool sse = true; + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + return b; + } +}; + +class SSEClassificationError { +public: + static const bool sse = true; + const real p; + float32x4_t mp; + uint32x4_t result; + +public: + explicit SSEClassificationError(const real s) : p(s) { + mp = vdupq_n_f32(p); + result = vdupq_n_u32(1); + } + // TODO: to be check + INLINE float32x4_t vecOp(const float32x4_t a, const float32x4_t b) const { + uint32x4_t tmp1 = vcgtq_f32(a, mp); + uint32x4_t tmp2 = vcgtq_f32(b, mp); + uint32x4_t tmp3 = veorq_u32(tmp1, tmp2); + return vcvtq_f32_u32(vandq_u32(tmp3, result)); + } +}; +} // namespace binary +} // namespace base + +#endif /* HL_MATRIX_BASE_NEON_CUH_ */ diff --git a/paddle/cuda/include/hl_matrix_type.cuh b/paddle/cuda/include/hl_matrix_type.cuh index 59213eee75f50d3c054ed8684a9a0e1053342a0a..f965ba966793f6f6eea0ad3606f60553fe904dda 100644 --- a/paddle/cuda/include/hl_matrix_type.cuh +++ b/paddle/cuda/include/hl_matrix_type.cuh @@ -17,13 +17,20 @@ limitations under the License. */ #include "hl_base.h" -#ifdef __CUDA_ARCH__ +#if defined(__CUDA_ARCH__) #include #ifndef PADDLE_TYPE_DOUBLE typedef float4 vecType; #else typedef double2 vecType; #endif +#elif (defined __ARM_NEON) || (defined __ARM_NEON__) +#include +#ifndef PADDLE_TYPE_DOUBLE +typedef float32x4_t vecType; +#else +#error NEON instructions does not support double precision +#endif #else #include #include diff --git a/paddle/cuda/include/hl_neon_matrix_kernel.cuh b/paddle/cuda/include/hl_neon_matrix_kernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7b4e5b00079b66d0a46a1344a43f41962cf50f10 --- /dev/null +++ b/paddle/cuda/include/hl_neon_matrix_kernel.cuh @@ -0,0 +1,299 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#ifndef HL_NEON_MATRIX_KERNEL_CUH_ +#define HL_NEON_MATRIX_KERNEL_CUH_ + +#include "hl_matrix_type.cuh" + +#define VECTOR_SIZE 16 + +/* number of float in vector */ +#define VECTOR_LEN 4 +#define VECTOR_SET vdupq_n_f32 + +inline bool hl_check_align(size_t size) { + return !(size & (VECTOR_SIZE - 1)); +} + +inline bool hl_check_align(void *ptr) { + return hl_check_align(reinterpret_cast(ptr)); +} + +template +inline real hl_agg_op(Agg agg, vecType mm) { + float32x4_t rev = vrev64q_f32(mm); + float32x4_t tmp1 = agg.vecOp(rev, rev); + float32x2_t lo = vget_high_f32(rev); + float32x2_t hi = vget_low_f32(rev); + float32x4_t tmp2 = vcombine_f32(hi, lo); + float32x4_t ret = agg.vecOp(tmp1, tmp2); + + return vgetq_lane_f32(ret, 0); +} + +template +void hl_sse_matrix_row_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, int ld, + real *A, int lda) { + for (int i = 0; i < dimM; i++, A += lda) { + vecType mm = VECTOR_SET(agg.init()); + vecType *a = (vecType*)(A); + for (int j = 0; j < dimN / VECTOR_LEN; j++, a++) { + mm = agg.vecOp(mm, op.vecOp(*a)); + } + + int rem = dimN % VECTOR_LEN; + if (rem) { + real tmp = hl_agg_op(agg, mm); + real *a = A + (dimN / VECTOR_LEN) * VECTOR_LEN; + for (int j = 0; j < rem; j++) { + tmp = agg(tmp, op(a[j])); + } + dst[i*ld] = sv(dst[i*ld], tmp); + } else { + dst[i*ld] = sv(dst[i*ld], hl_agg_op(agg, mm)); + } + } +} + +template +void hl_sse_matrix_row_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, int ld, + real *A, int lda, + real *B, int ldb) { + for (int i = 0; i < dimM; i++, A += lda, B += ldb) { + vecType mm = VECTOR_SET(agg.init()); + vecType *a = (vecType*)(A); + vecType *b = (vecType*)(B); + for (int j = 0; j < dimN / VECTOR_LEN; j++, a++, b++) { + mm = agg.vecOp(mm, op.vecOp(*a, *b)); + } + + int rem = dimN % VECTOR_LEN; + if (rem) { + real tmp = hl_agg_op(agg, mm); + real *a = A + (dimN / VECTOR_LEN) * VECTOR_LEN; + real *b = B + (dimN / VECTOR_LEN) * VECTOR_LEN; + for (int j = 0; j < rem; j++) { + tmp = agg(tmp, op(a[j], b[j])); + } + dst[i*ld] = sv(dst[i*ld], tmp); + } else { + dst[i*ld] = sv(dst[i*ld], hl_agg_op(agg, mm)); + } + } +} + +template +void hl_matrix_column_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda) { + for (int j = 0; j < dimN; j++) { + real tmp = agg.init(); + for (int i = 0; i < dimM; i++) { + tmp = agg(tmp, op(A[i * lda + j])); + } + dst[j] = sv(dst[j], tmp); + } +} + +template +void hl_matrix_column_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda, + real *B, int ldb) { + for (int j = 0; j < dimN; j++) { + real tmp = agg.init(); + for (int i = 0; i < dimM; i++) { + tmp = agg(tmp, op(A[i * lda + j], B[i * ldb + j])); + } + dst[j] = sv(dst[j], tmp); + } +} + +/* + * MaxRow greater than or equal dimN + * dimN is multiples of VECTOR_LEN + * so rem <= MaxRow / VECTOR_LEN + */ +template +void hl_sse_column_op_with_rem(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda) { + vecType mm[MaxRow / VECTOR_LEN]; + for (int n = 0; n < MaxRow / VECTOR_LEN; n++) { + mm[n] = VECTOR_SET(agg.init()); + } + + for (int i = 0; i < dimM; i++) { + vecType *a = (vecType*)(A + i * lda); + for (int n = 0; n < dimN / VECTOR_LEN; n++) { + mm[n] = agg.vecOp(mm[n], op.vecOp(a[n])); + } + } + + vecType *result = (vecType*)(dst); + for (int n = 0; n < dimN / VECTOR_LEN; n++) { + result[n] = sv.vecOp(result[n], mm[n]); + } + + int rem = dimN % VECTOR_LEN; + if (rem) { + A += (dimN / VECTOR_LEN) * VECTOR_LEN; + dst += (dimN / VECTOR_LEN) * VECTOR_LEN; + hl_matrix_column_op(agg, op, sv, dimM, rem, dst, A, lda); + } +} + +/* + * dimN is multiples of VECTOR_LEN + * dimN greater than Step + */ +template +void hl_sse_matrix_column_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda) { + for (int j = 0; j < dimN / Step; j++, dst += Step, A += Step) { + vecType mm[Step / VECTOR_LEN]; + for (int n = 0; n < Step / VECTOR_LEN; n++) { + mm[n] = VECTOR_SET(agg.init()); + } + + for (int i = 0; i < dimM; i++) { + vecType *a = (vecType*)(A + i * lda); + for (int n = 0; n < Step / VECTOR_LEN; n++) { + mm[n] = agg.vecOp(mm[n], op.vecOp(a[n])); + } + } + + vecType *result = (vecType*)(dst); + for (int n = 0; n < Step / VECTOR_LEN; n++) { + result[n] = sv.vecOp(result[n], mm[n]); + } + } + + int remRow = dimN % Step; + if (remRow) { + hl_sse_column_op_with_rem(agg, op, sv, dimM, remRow, dst, A, lda); + } +} + +template +void hl_sse_matrix_column_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda) { + if (dimN <= 16) { + hl_sse_matrix_column_op<16>(agg, op, sv, dimM, dimN, dst, A, lda); + } else if (dimN <= 32) { + hl_sse_matrix_column_op<32>(agg, op, sv, dimM, dimN, dst, A, lda); + } else if (dimN <= 1024 || dimM <= 512) { + hl_sse_matrix_column_op<64>(agg, op, sv, dimM, dimN, dst, A, lda); + } else { + hl_sse_matrix_column_op<1024>(agg, op, sv, dimM, dimN, dst, A, lda); + } +} + +template +void hl_sse_column_op_with_rem(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda, + real *B, int ldb) { + vecType mm[MaxRow / VECTOR_LEN]; + for (int n = 0; n < MaxRow / VECTOR_LEN; n++) { + mm[n] = VECTOR_SET(agg.init()); + } + + for (int i = 0; i < dimM; i++) { + vecType *a = (vecType*)(A + i * lda); + vecType *b = (vecType*)(B + i * ldb); + for (int n = 0; n < dimN / VECTOR_LEN; n++) { + mm[n] = agg.vecOp(mm[n], op.vecOp(a[n], b[n])); + } + } + + vecType *result = (vecType*)(dst); + for (int n = 0; n < dimN / VECTOR_LEN; n++) { + result[n] = sv.vecOp(result[n], mm[n]); + } + + int rem = dimN % VECTOR_LEN; + if (rem) { + A += (dimN / VECTOR_LEN) * VECTOR_LEN; + B += (dimN / VECTOR_LEN) * VECTOR_LEN; + dst += (dimN / VECTOR_LEN) * VECTOR_LEN; + hl_matrix_column_op(agg, op, sv, dimM, rem, dst, A, lda, B, ldb); + } +} + +template +void hl_sse_matrix_column_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda, + real *B, int ldb) { + for (int j = 0; j < dimN / Step; j++, dst += Step, A += Step, B += Step) { + vecType mm[Step / VECTOR_LEN]; + for (int n = 0; n < Step / VECTOR_LEN; n++) { + mm[n] = VECTOR_SET(agg.init()); + } + + for (int i = 0; i < dimM; i++) { + vecType *a = (vecType*)(A + i * lda); + vecType *b = (vecType*)(B + i * ldb); + for (int n = 0; n < Step / VECTOR_LEN; n++) { + mm[n] = agg.vecOp(mm[n], op.vecOp(a[n], b[n])); + } + } + + vecType *result = (vecType*)(dst); + for (int n = 0; n < Step / VECTOR_LEN; n++) { + result[n] = sv.vecOp(result[n], mm[n]); + } + } + + int remRow = dimN % Step; + if (remRow) { + hl_sse_column_op_with_rem( + agg, op, sv, dimM, remRow, dst, A, lda, B, ldb); + } +} + +template +void hl_sse_matrix_column_op(Agg agg, Op op, Saver sv, + int dimM, int dimN, + real *dst, + real *A, int lda, + real *B, int ldb) { + if (dimN <= 16) { + hl_sse_matrix_column_op<16>(agg, op, sv, dimM, dimN, dst, A, lda, B, ldb); + } else if (dimN <= 32) { + hl_sse_matrix_column_op<32>(agg, op, sv, dimM, dimN, dst, A, lda, B, ldb); + } else if (dimN <= 1024 || dimM <= 512) { + hl_sse_matrix_column_op<64>(agg, op, sv, dimM, dimN, dst, A, lda, B, ldb); + } else { + hl_sse_matrix_column_op<1024>(agg, op, sv, dimM, dimN, dst, A, lda, B, ldb); + } +} + +#endif /* HL_NEON_MATRIX_KERNEL_CUH_ */ diff --git a/paddle/cuda/include/hl_sequence.h b/paddle/cuda/include/hl_sequence.h index 9f9d8f972e3a4c62e5caedcf85054be5681b96c1..973ddcceed99ba4177b3db277e664611d42ac51b 100644 --- a/paddle/cuda/include/hl_sequence.h +++ b/paddle/cuda/include/hl_sequence.h @@ -159,4 +159,10 @@ extern void hl_sequence_avg_forward(real* dst, int width, const int mode); +extern void hl_sequence_avg_backward(real* dst, + real* src, + const int* starts, + int height, + int width, + const int mode); #endif /* HL_SEQUENCE_H_ */ diff --git a/paddle/cuda/include/stub/hl_sequence_stub.h b/paddle/cuda/include/stub/hl_sequence_stub.h index 05e51bce9e1df6fc6ef1cad891b44a9172da185d..920b417b1c717efaff75f70f1b9d2b574469e425 100644 --- a/paddle/cuda/include/stub/hl_sequence_stub.h +++ b/paddle/cuda/include/stub/hl_sequence_stub.h @@ -57,4 +57,10 @@ inline void hl_sequence_avg_forward(real* dst, int width, const int mode) {} +inline void hl_sequence_avg_backward(real* dst, + real* src, + const int* starts, + int height, + int width, + const int mode) {} #endif // HL_SEQUENCE_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_sequence.cu b/paddle/cuda/src/hl_cuda_sequence.cu index ba823de2720336851bf9c49d8162360af93e8601..0fe2877f89f8d0fbc4db40c400037be30bb87ff7 100644 --- a/paddle/cuda/src/hl_cuda_sequence.cu +++ b/paddle/cuda/src/hl_cuda_sequence.cu @@ -325,12 +325,12 @@ __global__ void KeSequenceAvgForward(real* dst, int seqLength = end - start; if (seqLength == 0) return; real sum = 0.0; - for (int i = 0; i < seqLength; i++) { - sum += src[(start + i) * width + col]; + for (int i = start; i < end; i++) { + sum += src[i * width + col]; } sum = mode == 1 ? sum : (mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength)); - dst[row * width + col] = sum; + dst[gid] = sum; } } @@ -354,3 +354,48 @@ void hl_sequence_avg_forward(real* dst, (dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_forward failed"); } + +__global__ void KeSequenceAvgBackward(real* dst, + real* src, + const int* starts, + int height, + int width, + const int mode) { + int gid = blockIdx.x * blockDim.x + threadIdx.x; + int row = gid / width; + int col = gid % width; + + if (gid < height * width) { + int start = starts[row]; + int end = starts[row + 1]; + int seqLength = end - start; + if (seqLength == 0) return; + real grad = src[gid]; + grad = mode == 1 ? grad : + (mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength)); + for (int i = start; i < end; i++) { + dst[i * width + col] += grad; + } + } +} + +void hl_sequence_avg_backward(real* dst, + real* src, + const int* starts, + int height, + int width, + const int mode) { + CHECK_NOTNULL(dst); + CHECK_NOTNULL(src); + CHECK_NOTNULL(starts); + + int block = 512; + int grid = DIVUP(width * height, 512); + + CHECK(mode == 0 || mode == 1 || mode == 2) + << "mode error in hl_sequence_avg_backward!"; + + KeSequenceAvgBackward<<< grid, block, 0, STREAM_DEFAULT >>> + (dst, src, starts, height, width, mode); + CHECK_SYNC("hl_sequence_avg_backward failed"); +} diff --git a/paddle/function/CosSimOpGpu.cu b/paddle/function/CosSimOpGpu.cu index 1dd733674fa0542c76070955ec63e008b083c7d2..c62ab39551f02288618244871ae31c6800df5b42 100644 --- a/paddle/function/CosSimOpGpu.cu +++ b/paddle/function/CosSimOpGpu.cu @@ -92,7 +92,6 @@ void CosSimForward(GpuMatrix& out_mat, CHECK(in1_mat.useGpu_ == true && in2_mat.useGpu_ == true) << "Matrix type are not GPU"; - size_t num_samples = out_mat.getHeight(); size_t dim = in1_mat.getWidth(); real* out = out_mat.getData(); const real* x = in1_mat.getData(); diff --git a/paddle/function/Function.cpp b/paddle/function/Function.cpp index f47d55a4ade97d76e0f1940a2234e34e20efade6..f71c0f681b3bc524ba96c55f1dcad30ef59478c8 100644 --- a/paddle/function/Function.cpp +++ b/paddle/function/Function.cpp @@ -16,66 +16,6 @@ limitations under the License. */ namespace paddle { -template <> -size_t FuncConfig::get(const std::string& key) const { - auto it = valueMap_.find(key); - CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'"; - return it->second.s; -} - -template <> -real FuncConfig::get(const std::string& key) const { - auto it = valueMap_.find(key); - CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'"; - return it->second.r; -} - -template <> -int FuncConfig::get(const std::string& key) const { - auto it = valueMap_.find(key); - CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'"; - return it->second.i; -} - -template <> -bool FuncConfig::get(const std::string& key) const { - auto it = valueMap_.find(key); - CHECK(it != valueMap_.end()) << "Cannot find value: '" << key << "'"; - return it->second.b; -} - -template <> -FuncConfig& FuncConfig::set(const std::string& key, size_t v) { - CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " - << key; - valueMap_[key].s = v; - return *this; -} - -template <> -FuncConfig& FuncConfig::set(const std::string& key, real v) { - CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " - << key; - valueMap_[key].r = v; - return *this; -} - -template <> -FuncConfig& FuncConfig::set(const std::string& key, int v) { - CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " - << key; - valueMap_[key].i = v; - return *this; -} - -template <> -FuncConfig& FuncConfig::set(const std::string& key, bool v) { - CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " - << key; - valueMap_[key].b = v; - return *this; -} - void BufferArgs::addArg(const Matrix& arg, const TensorShape& shape, ArgType argType) { diff --git a/paddle/function/Function.h b/paddle/function/Function.h index 3bbeb6e525f85bdde9a54c8d60146eaa30a1bb4d..15eb35b7f7dac1b98f2d8694707d83b84bda0f2e 100644 --- a/paddle/function/Function.h +++ b/paddle/function/Function.h @@ -18,32 +18,49 @@ limitations under the License. */ #include #include "BufferArg.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Any.h" #include "paddle/utils/ClassRegistrar.h" +#include "paddle/utils/Error.h" namespace paddle { /** * Function Configuration. * The argument type of Function::init. - * Follow-up will consider moving this data structure to Proto inside. */ class FuncConfig { public: - union value { - size_t s; - real r; - int i; - bool b; - }; - template - T get(const std::string& key) const; + T get(const std::string& key, Error* err = nullptr) const { + try { + return any_cast(valueMap_.at(key)); + } catch (std::exception& e) { // could be cast or out of range exception. + if (err) { + *err = Error(e.what()); + } else { + LOG(FATAL) << "Cannot get key " << key << " with error " << e.what(); + } + return T(); + } + } template - FuncConfig& set(const std::string& key, T v); + FuncConfig& set(const std::string& key, T v, Error* err = nullptr) { + auto it = valueMap_.find(key); + if (it != valueMap_.end()) { // already contains key. + if (err) { + *err = Error("Key %s is already set in FuncConfig", key.c_str()); + } else { + LOG(FATAL) << "Key " << key << " is already set in FuncConfig."; + } + return *this; + } + valueMap_[key] = any(v); + return *this; + } protected: - std::map valueMap_; + mutable std::unordered_map valueMap_; }; /** diff --git a/paddle/function/PadOp.cpp b/paddle/function/PadOp.cpp index f1a0d2a1a96f24ddff8cd120681a8bc8cddaf40a..adba7c92ece505eecc74edce6b393cf27fa10ccc 100644 --- a/paddle/function/PadOp.cpp +++ b/paddle/function/PadOp.cpp @@ -25,9 +25,9 @@ void Pad(real* outputs, const int inH, const int inW, const PadConf& pad) { - int cstart = pad.channelStart, cend = pad.channelEnd; - int hstart = pad.heightStart, hend = pad.heightEnd; - int wstart = pad.widthStart, wend = pad.widthEnd; + int cstart = pad.channel[0], cend = pad.channel[1]; + int hstart = pad.height[0], hend = pad.height[1]; + int wstart = pad.width[0], wend = pad.width[1]; int outC = inC + cstart + cend; int outH = inH + hstart + hend; int outW = inW + wstart + wend; @@ -51,9 +51,9 @@ void PadGrad(real* inGrad, const int inH, const int inW, const PadConf& pad) { - int cstart = pad.channelStart, cend = pad.channelEnd; - int hstart = pad.heightStart, hend = pad.heightEnd; - int wstart = pad.widthStart, wend = pad.widthEnd; + int cstart = pad.channel[0], cend = pad.channel[1]; + int hstart = pad.height[0], hend = pad.height[1]; + int wstart = pad.width[0], wend = pad.width[1]; int outC = inC + cstart + cend; int outH = inH + hstart + hend; int outW = inW + wstart + wend; @@ -71,6 +71,12 @@ void PadGrad(real* inGrad, } } +static inline PadConf castToPadConf(const FuncConfig& conf) { + return {conf.get>("channel"), + conf.get>("height"), + conf.get>("width")}; +} + /** * \brief Padding zeros to input according to the specify dimension. * The struct pad_ contains the padding size in each dimension. @@ -127,14 +133,7 @@ void PadGrad(real* inGrad, template class PadFunc : public FunctionBase { public: - void init(const FuncConfig& config) override { - pad_.channelStart = config.get("cstart"); - pad_.channelEnd = config.get("cend"); - pad_.heightStart = config.get("hstart"); - pad_.heightEnd = config.get("hend"); - pad_.widthStart = config.get("wstart"); - pad_.widthEnd = config.get("wend"); - } + void init(const FuncConfig& config) override { pad_ = castToPadConf(config); } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { CHECK_EQ(1UL, inputs.size()); @@ -175,14 +174,7 @@ private: template class PadGradFunc : public FunctionBase { public: - void init(const FuncConfig& config) override { - pad_.channelStart = config.get("cstart"); - pad_.channelEnd = config.get("cend"); - pad_.heightStart = config.get("hstart"); - pad_.heightEnd = config.get("hend"); - pad_.widthStart = config.get("wstart"); - pad_.widthEnd = config.get("wend"); - } + void init(const FuncConfig& config) override { pad_ = castToPadConf(config); } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { CHECK_EQ(1UL, inputs.size()); diff --git a/paddle/function/PadOp.h b/paddle/function/PadOp.h index 7b5c730a6a0fa57833e63beba085cb17054ae2f5..0e226ec7370b9897ebdc697ee528b90a37e4ec56 100644 --- a/paddle/function/PadOp.h +++ b/paddle/function/PadOp.h @@ -19,18 +19,12 @@ limitations under the License. */ namespace paddle { struct PadConf { - /// how many values to add before the data along channel dimension. - int channelStart; - /// how many values to add after the data along channel dimension. - int channelEnd; - /// how many values to add before the data along height dimension. - int heightStart; - /// how many values to add after the data along height dimension. - int heightEnd; - /// how many values to add before the data along width dimension. - int widthStart; - /// how many values to add after the data along width dimension. - int widthEnd; + /// how many values to add before/after the data along channel dimension. + std::vector channel; + /// how many values to add before/after the data along height dimension. + std::vector height; + /// how many values to add before/after the data along width dimension. + std::vector width; }; /** diff --git a/paddle/function/PadOpGpu.cu b/paddle/function/PadOpGpu.cu index 9104b1aca507c526858c2117e0a5db59f535091e..9094f1528433fdcaad3397a991aa8ac6fa04bc01 100644 --- a/paddle/function/PadOpGpu.cu +++ b/paddle/function/PadOpGpu.cu @@ -44,9 +44,9 @@ void Pad(real* outputs, size_t nth = num * inC * inH * inW; int blockSize = 1024; int gridSize = (nth + 1024 - 1) / 1024; - int cstart = pad.channelStart, cend = pad.channelEnd; - int hstart = pad.heightStart, hend = pad.heightEnd; - int wstart = pad.widthStart, wend = pad.widthEnd; + int cstart = pad.channel[0], cend = pad.channel[1]; + int hstart = pad.height[0], hend = pad.height[1]; + int wstart = pad.width[0], wend = pad.width[1]; int outC = inC + cstart + cend; int outH = inH + hstart + hend; int outW = inW + wstart + wend; @@ -83,9 +83,9 @@ void PadGrad(real* inGrad, int nth = num * inC * inH * inW; int blockSize = 1024; int gridSize = (nth + 1024 - 1) / 1024; - int cstart = pad.channelStart, cend = pad.channelEnd; - int hstart = pad.heightStart, hend = pad.heightEnd; - int wstart = pad.widthStart, wend = pad.widthEnd; + int cstart = pad.channel[0], cend = pad.channel[1]; + int hstart = pad.height[0], hend = pad.height[1]; + int wstart = pad.width[0], wend = pad.width[1]; int outC = inC + cstart + cend; int outH = inH + hstart + hend; int outW = inW + wstart + wend; diff --git a/paddle/function/PadOpTest.cpp b/paddle/function/PadOpTest.cpp index cd22d9113567912f7694e05e5d631e49d940e3ac..f77ac2a8c49c83f2d6c64c2a30b6a2f2eb09ac10 100644 --- a/paddle/function/PadOpTest.cpp +++ b/paddle/function/PadOpTest.cpp @@ -24,48 +24,22 @@ TEST(Pad, real) { for (size_t imgSizeW : {5, 32, 96}) { VLOG(3) << " numSamples=" << numSamples << " channels=" << channels << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW; - - FunctionCompare compare("Pad", - FuncConfig() - .set("cstart", 2) - .set("cend", 3) - .set("hstart", 1) - .set("hend", 2) - .set("wstart", 3) - .set("wend", 2)); - TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; - TensorShape outDims{ - numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5}; - compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, inDims)); - compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT, outDims, ASSIGN_TO)); - compare.run(); - } - } - } - } -} - -TEST(PadGrad, real) { - for (size_t numSamples : {5, 32}) { - for (size_t channels : {1, 5, 32}) { - for (size_t imgSizeH : {5, 33, 100}) { - for (size_t imgSizeW : {5, 32, 96}) { - VLOG(3) << " numSamples=" << numSamples << " channels=" << channels - << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW; - FunctionCompare compare("PadGrad", - FuncConfig() - .set("cstart", 2) - .set("cend", 3) - .set("hstart", 1) - .set("hend", 2) - .set("wstart", 3) - .set("wend", 2)); - TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; - TensorShape outDims{ - numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5}; - compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, outDims)); - compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT, inDims, ASSIGN_TO)); - compare.run(); + for (bool test_grad : {false, true}) { + FunctionCompare compare( + test_grad ? "PadGrad" : "Pad", + FuncConfig() + .set>("channel", {2, 3}) + .set>("height", {1, 2}) + .set>("width", {3, 2})); + TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; + TensorShape outDims{ + numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5}; + compare.addInputs( + BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims)); + compare.addOutputs(BufferArg( + VALUE_TYPE_FLOAT, test_grad ? inDims : outDims, ASSIGN_TO)); + compare.run(); + } } } } diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 4f92150ec84d637c5b75cba09d7e98501a5a5f5d..93a6a99848aa13bb36c9c5c7091fbaa891fc9823 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -25,12 +25,16 @@ filter_test(GSERVER_HEADER) filter_test(GSERVER_SOURCES) if(NOT WITH_GPU) list(REMOVE_ITEM GSERVER_HEADER + layers/CudnnConvBaseLayer.h layers/CudnnConvLayer.h + layers/CudnnConvTransLayer.h layers/CudnnPoolLayer.h layers/CudnnBatchNormLayer.h) list(REMOVE_ITEM GSERVER_SOURCES + layers/CudnnConvBaseLayer.cpp layers/CudnnConvLayer.cpp + layers/CudnnConvTransLayer.cpp layers/CudnnPoolLayer.cpp layers/CudnnBatchNormLayer.cpp) compile_cu_as_cpp(layers/LstmCompute.cu) diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 9a2ad7567f0dc93d0a8e396fd88b2488afe9d049..40036762179ebb1495b90907f16b97e3c60c50d8 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -164,15 +164,6 @@ public: argu.value = value; data_.push_back(argu); } - /** - * @brief Append user defined data - * @param[in] ptr user defined data - */ - void appendUserDefinedPtr(UserDefinedVectorPtr ptr) { - Argument argu; - argu.udp = ptr; - data_.push_back(argu); - } /* * @brief Append argument diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index 0829968d87c5dc7eeb2d1b70c758ff305d89496f..f9c82a2bef82b4e6bcbf0c73583505d2692f3926 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -134,6 +134,8 @@ public: backward(callback); } + virtual Argument getLayerOutput(const std::string& layerName) = 0; + // see comment in Layer.h for the function with the same name virtual void resetState() {} diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 80f223824d8dccfb0e9386f4c076b28f9332a958..6ae60102b3e431727c0954e8b8073bfe0534f8ee 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -24,9 +24,6 @@ limitations under the License. */ DEFINE_bool(allow_only_one_model_on_one_gpu, true, "If true, do not allow multiple models on one GPU device"); -#ifdef PADDLE_METRIC_LEARNING -DECLARE_bool(external); -#endif namespace paddle { @@ -45,11 +42,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config, trainerBarrier_(FLAGS_trainer_count), allBarrier_(FLAGS_trainer_count + 1), inArgsCopied_(false) { -#ifdef PADDLE_METRIC_LEARNING - isPassGrad_ = FLAGS_external; -#else isPassGrad_ = false; -#endif numThreads_ = FLAGS_trainer_count; if (useGpu) { //! TODO(yuyang18): When useGpu=false && paddle is not compiled with gpu, @@ -282,6 +275,18 @@ void MultiGradientMachine::forwardBackward(const std::vector& inArgs, backwardImp(callback); } +Argument MultiGradientMachine::getLayerOutput(const std::string& layerName) { + std::vector args; + args.reserve(threads_.size()); + + for (auto& thread : threads_) { + args.push_back(thread->getGradientMachine()->getLayerOutput(layerName)); + } + outLayerArgs_.concat(args, false /* use_gpu */, outArgStream_, passType_); + + return outLayerArgs_; +} + void MultiGradientMachine::backwardImp(const UpdateCallback& callback) { for (size_t i = 0; i < parameters_.size(); i++) { if (!parameters_[i]->useGpu() || parameters_[i]->isStatic()) continue; @@ -334,7 +339,9 @@ Evaluator* MultiGradientMachine::makeEvaluator() const { void MultiGradientMachine::eval(Evaluator* evaluator) const { for (auto& thread : threads_) { SetDevice device(thread->getDeviceId()); - thread->getGradientMachine()->eval(evaluator); + if (thread->hasInputData()) { + thread->getGradientMachine()->eval(evaluator); + } } } @@ -344,14 +351,19 @@ void MultiGradientMachine::getOutArgs(std::vector* outArgs, REGISTER_TIMER("waitOutArgs"); thread->waitOutArgsReady(); } - outArgs_.resize(threads_[0]->getOutArgs().size()); + + outArgs_.resize(threads_[threads_.size() - 1]->getOutArgs().size()); REGISTER_TIMER("copyOutArgs"); for (size_t i = 0; i < outArgs_.size(); ++i) { std::vector args; args.reserve(threads_.size()); for (auto& thread : threads_) { - args.push_back(thread->getOutArgs()[i]); + // If the thread input is empty, then the output is empty. + auto tmp = thread->getOutArgs(); + if (tmp.size() > 0) { + args.push_back(tmp[i]); + } } outArgs_[i].concat(args, useGpu_, outArgStream_, passType); } @@ -522,7 +534,7 @@ void TrainerThread::prefetch() { void TrainerThread::forward() { if (!inArgsCopied_) { REGISTER_TIMER("copyInArgs"); - copyInArgs(); + batchSize_ = copyInArgs(); } else { inArgsCopied_ = false; } @@ -552,7 +564,12 @@ void TrainerThread::forward() { { REGISTER_TIMER("thread_forward"); - gradientMachine_->forward(inArgs_, &outArgs_, multiMachine_->getPassType()); + if (batchSize_ > 0) { + gradientMachine_->forward( + inArgs_, &outArgs_, multiMachine_->getPassType()); + } else { + outArgs_.clear(); + } } outArgsReadySem_.post(); } @@ -562,7 +579,13 @@ void TrainerThread::backward() { if (multiMachine_->isPassGrad()) { copyOutputGrad(); } - gradientMachine_->backward(backwardCallback_); + if (batchSize_ > 0) { + gradientMachine_->backward(backwardCallback_); + } else { + for (size_t i = parameters_.size(); i > 0; i--) { + backwardCallback(parameters_[i - 1].get()); + } + } if (multiMachine_->hasNonstaticCpuParamters()) { mergeCpuGradients(); } @@ -720,7 +743,7 @@ void TrainerThread::notifyValueReady(int paramId) { notifyValueDispatch(paramId); } -void TrainerThread::copyInArgs() { +int TrainerThread::copyInArgs() { const std::vector& fullInArgs = multiMachine_->getInArgs(); int numThreads = multiMachine_->getAllThreads().size(); int32_t numSequences = fullInArgs[0].getNumSequences(); @@ -736,7 +759,7 @@ void TrainerThread::copyInArgs() { } if (copySize == 0) { - return; + return 0; } for (size_t i = 0; i < fullInArgs.size(); i++) { @@ -746,6 +769,7 @@ void TrainerThread::copyInArgs() { copySize, FLAGS_parallel_nn ? false : multiMachine_->useGpu()); } + return copySize; } void TrainerThread::mergeCpuGradients() { diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.h b/paddle/gserver/gradientmachines/MultiGradientMachine.h index 9be15ef4bcf34f26b7eceb9047252e537f20a4a8..70203bbb97fe79d72fbc6bd2b5d427cb1de7b61f 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.h @@ -189,6 +189,8 @@ public: PassType passType, const UpdateCallback& callback); + virtual Argument getLayerOutput(const std::string& layerName); + virtual void onPassEnd(); virtual void finish(); @@ -314,6 +316,8 @@ protected: std::vector outArgs_; hl_stream_t outArgStream_; + Argument outLayerArgs_; + /// ParameterType which needs to be merged from each GPU std::vector mergeTypes_; int numDevices_; /* number of gpu devices */ @@ -383,6 +387,9 @@ public: /// copy the output gradient from the main GradientMachine. void copyOutputGrad(); + /// Whether the thread has input data. + bool hasInputData() { return batchSize_ != 0; } + protected: void mergeCpuGradients(); @@ -403,7 +410,7 @@ protected: void copyGradToBufferThread(); void gradCollectThread(); - void copyInArgs(); + int copyInArgs(); void forward(); void backward(); void backwardCallback(Parameter* para); @@ -463,6 +470,7 @@ protected: /// indicate whether inArgs is copied before forward() bool inArgsCopied_; + int batchSize_; }; } // namespace paddle diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 273a9111c35a21c01f0cd8d283ecc6eaa4ef0c61..4512aacc81f86bf87fc9ea30adcf081327663f16 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -293,11 +293,10 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { } } -MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) { - auto it = layerMap_.find(layerName); - CHECK(it != layerMap_.end()) << "Cannot find layer: " << layerName; - return it->second->getOutputValue(); +Argument NeuralNetwork::getLayerOutput(const std::string& layerName) { + return getLayer(layerName)->getOutput(); } + void NeuralNetwork::onPassEnd() { for (auto& layer : layers_) { layer->onPassEnd(); diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 25af4abcf81700e200feea806fa3daed19df1275..e7b6c438407e7eab6eab1f6ed496f35caa9f2177 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -87,7 +87,8 @@ public: virtual void backward(const UpdateCallback& callback = nullptr); - MatrixPtr getLayerOutput(const std::string& layerName); + virtual Argument getLayerOutput(const std::string& layerName); + const LayerPtr& getLayer(const std::string& layerName) const { auto it = layerMap_.find(layerName); CHECK(it != layerMap_.end()) << "Unknown layer " << layerName; diff --git a/paddle/gserver/layers/AgentLayer.cpp b/paddle/gserver/layers/AgentLayer.cpp index 2d300290279d6aafc162f11dbc809537a308ca79..7b1b99b135e35e5fe41dbb3d053a96e3e31e5cf1 100644 --- a/paddle/gserver/layers/AgentLayer.cpp +++ b/paddle/gserver/layers/AgentLayer.cpp @@ -42,7 +42,8 @@ void AgentLayer::forward(PassType passType) { // get Arguments from real layers if (numSamples_ > 0 && numSamples_ < realHeight) { if (realOutput.ids) { - output_.ids->subVecFrom(*realOutput.ids, 0, numSamples_); + output_.ids = + IVector::create(realOutput.ids->getData(), numSamples_, useGpu_); } else { output_.subArgFrom( realOutput, /* offset */ 0, numSamples_, getSize(), useGpu_); diff --git a/paddle/gserver/layers/AverageLayer.cpp b/paddle/gserver/layers/AverageLayer.cpp index b8955ab04f209629c855ed66f8e8e9701b7224a3..96cc4288c6faad4b80c790ed2ce6f5128ea83b6d 100644 --- a/paddle/gserver/layers/AverageLayer.cpp +++ b/paddle/gserver/layers/AverageLayer.cpp @@ -26,8 +26,6 @@ bool AverageLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { SequencePoolLayer::init(layerMap, parameterMap); - dataMtx_ = Matrix::create(nullptr, 1, 1, false, useGpu_); - outMtx_ = Matrix::create(nullptr, 1, getSize(), false, useGpu_); // average strategy if (config_.average_strategy() == "average") { mode_ = kAverage; @@ -60,43 +58,9 @@ void AverageLayer::forward(PassType passType) { void AverageLayer::backward(const UpdateCallback& callback) { SequencePoolLayer::backward(callback); - const int* starts = startPositions_->getData(false); - MatrixPtr grad = getInputGrad(0); - - if (grad) { - size_t dim = getSize(); - real* gradientData = getInputGrad(0)->getData(); - real* gradient = getOutputGrad()->getData(); - size_t numSequences = startPositions_->getSize() - 1; - for (size_t sequenceId = 0; sequenceId < numSequences; ++sequenceId) { - // TODO(Dangqingqing) optimization for GPU - int sequenceLength = starts[sequenceId + 1] - starts[sequenceId]; - if (0 == sequenceLength) { - // empty sequence - continue; - } - dataMtx_->setData( - gradientData + starts[sequenceId] * dim, sequenceLength, dim); - outMtx_->setData(gradient + sequenceId * dim); - switch (mode_) { - case kAverage: { - // plain average - dataMtx_->addBias(*outMtx_, 1.0f / sequenceLength); - break; - } - case kSum: { - // sum instead of average - dataMtx_->addBias(*outMtx_, 1.0f); - break; - } - case kAverageSquareRootN: { - // divide by square root of sequenceLength - dataMtx_->addBias(*outMtx_, 1.0f / sqrt(sequenceLength)); - break; - } - default: { LOG(FATAL) << "should not reach here"; } - } - } + if (getInputGrad(0)) { + getInputGrad(0)->sequenceAvgBackward( + *getOutputGrad(), *startPositions_->getVector(useGpu_), mode_); } } diff --git a/paddle/gserver/layers/AverageLayer.h b/paddle/gserver/layers/AverageLayer.h index 621e1d7bb12ec5b8c7a6173bd601835d9406e814..332552a30479a368c24db10e5ef3a9d59408c8ef 100644 --- a/paddle/gserver/layers/AverageLayer.h +++ b/paddle/gserver/layers/AverageLayer.h @@ -45,8 +45,6 @@ public: void backward(const UpdateCallback& callback = nullptr) override; protected: - MatrixPtr outMtx_; - MatrixPtr dataMtx_; int mode_; }; } // namespace paddle diff --git a/paddle/gserver/layers/CRFDecodingLayer.cpp b/paddle/gserver/layers/CRFDecodingLayer.cpp index fdb46aba68e924480a6595b02c04ff4d1edd914d..191176ce985a8e12e33562f0cab73da6bbe667e6 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.cpp +++ b/paddle/gserver/layers/CRFDecodingLayer.cpp @@ -24,7 +24,7 @@ bool CRFDecodingLayer::init(const LayerMap& layerMap, return false; } crf_.reset(new LinearChainCRF( - numClasses_, parameter_->getBuf(PARAMETER_VALUE)->getData(), nullptr)); + numClasses_, parameter_->getBuf(PARAMETER_VALUE)->getData())); return true; } diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 02b7aaf17e89d889ca0030f9de2b5d7431a28fd3..0b544420097e9150f8489731b6379dea633e992c 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -42,6 +42,7 @@ bool CRFLayer::init(const LayerMap& layerMap, CHECK_EQ(parameters_[0]->getSize(), numClasses_ * (numClasses_ + 2)); parameter_ = parameters_[0]; + weight_.reset(new Weight(numClasses_ + 2, numClasses_, parameter_)); // We don't need sequenceStartPositions because each sample of output_ is // for the cost of one sequence. @@ -69,11 +70,7 @@ void CRFLayer::forward(PassType passType) { for (size_t i = 0; i < numSequences; ++i) { if (i >= crfs_.size()) { - crfs_.emplace_back(numClasses_, - parameter_->getBuf(PARAMETER_VALUE)->getData(), - parameter_->getBuf(PARAMETER_GRADIENT) - ? parameter_->getBuf(PARAMETER_GRADIENT)->getData() - : nullptr); + crfs_.emplace_back(numClasses_, weight_->getW()->getData()); } output_.value->getData()[i] = crfs_[i].forward(output.value->getData() + numClasses_ * starts[i], @@ -93,22 +90,25 @@ void CRFLayer::backward(const UpdateCallback& callback) { const int* starts = label.sequenceStartPositions->getData(false); int numSequences = label.sequenceStartPositions->getSize() - 1; + bool needWGrad = weight_->getWGrad() ? true : false; for (int i = 0; i < numSequences; ++i) { crfs_[i].backward(output.value->getData() + numClasses_ * starts[i], - output.grad->getData() + numClasses_ * starts[i], label.ids->getData() + starts[i], - starts[i + 1] - starts[i]); - if (weightLayer_) { - real weight = getInputValue(*weightLayer_)->getElement(i, 0); - MatrixPtr grad = output.grad->subRowMatrix(starts[i], starts[i + 1]); - grad->mulScalar(weight); + starts[i + 1] - starts[i], + needWGrad); + real instanceWeight = weightLayer_ + ? getInputValue(*weightLayer_)->getElement(i, 0) + : real(1.0f); + instanceWeight *= coeff_; + + MatrixPtr grad = output.grad->subRowMatrix(starts[i], starts[i + 1]); + grad->add(*crfs_[i].getXGrad(), real(1.0f), instanceWeight); + if (needWGrad) { + weight_->getWGrad()->add( + *crfs_[i].getWGrad(), real(1.0f), instanceWeight); } } - if (coeff_ != real(1.0f)) { - output.grad->mulScalar(coeff_); - } - parameter_->incUpdate(callback); } diff --git a/paddle/gserver/layers/CRFLayer.h b/paddle/gserver/layers/CRFLayer.h index de36a85083b6b293fd2d8522ec279a38cc4f8be3..00ec13cede97401b4c8a308df6fac27e47692146 100644 --- a/paddle/gserver/layers/CRFLayer.h +++ b/paddle/gserver/layers/CRFLayer.h @@ -38,8 +38,9 @@ protected: size_t numClasses_; ParameterPtr parameter_; std::vector crfs_; - LayerPtr weightLayer_; // weight for each sequence - real coeff_; // weight for the layer + LayerPtr weightLayer_; // weight for each sequence + std::unique_ptr weight_; // parameters + real coeff_; // weight for the layer }; } // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseOperator.cpp b/paddle/gserver/layers/ConvBaseOperator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5c231986292d2cd26ee30ccc122142fccd5b4949 --- /dev/null +++ b/paddle/gserver/layers/ConvBaseOperator.cpp @@ -0,0 +1,150 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "ConvBaseOperator.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * @brief ConvBaseOperator takes two inputs to perform the convolution. + * The first input is the image, and the second input is the convolution kernel. + * The height of data for two inputs are the same. Each data of the first input + * is convolved with each data of the second input indepedently. + * + * The config file api is conv_operator. + */ + +ConvBaseOperator::ConvBaseOperator(const OperatorConfig &config, bool useGpu) + : Operator(config, useGpu) { + CHECK(useGpu); + CHECK_EQ(config_.input_indices_size(), 2L); + + caffeMode_ = true; + getConvParams(); + computeConvSizes(); + + // initialize all to default algorithms + fwdAlgo_ = 0; + bwdFilterAlgo_ = 0; + bwdDataAlgo_ = 0; + fwdLimitBytes_ = 0; + bwdDataLimitBytes_ = 0; + bwdFilterLimitBytes_ = 0; + workSpaceInBytes_ = 0; + workSpace_ = nullptr; + + isSelectAlgo_ = false; +} + +void ConvBaseOperator::allocConvWorkSpace() { + hl_conv_workspace(imageDesc_, + outputDesc_, + filterDesc_, + convDesc_, + &fwdAlgo_, + &fwdLimitBytes_, + &bwdDataAlgo_, + &bwdDataLimitBytes_, + &bwdFilterAlgo_, + &bwdFilterLimitBytes_); + + size_t maxWorkSpace = 0; + maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_); + maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_); + + if (maxWorkSpace > workSpaceInBytes_) { + if (workSpaceInBytes_ != 0) { + hl_free_mem_device(workSpace_); + } + // total amount of storage needed + workSpace_ = hl_malloc_device(maxWorkSpace); + workSpaceInBytes_ = maxWorkSpace; + } +} + +void ConvBaseOperator::computeConvSizes() { + hl_create_filter_descriptor( + &filterDesc_, channels_, numFilters_, filterSizeY_, filterSize_); + hl_create_tensor_descriptor(&imageDesc_); + hl_create_tensor_descriptor(&outputDesc_); + hl_create_convolution_descriptor(&convDesc_, + imageDesc_, + filterDesc_, + paddingY_, + padding_, + strideY_, + stride_); +} + +void ConvBaseOperator::reshapeImageDescriptors() { + hl_tensor_reshape(imageDesc_, + 1, + channels_, + imageH_, + imageW_, + channels_ * imageH_ * imageW_, + imageH_ * imageW_, + imageW_, + 1); + hl_tensor_reshape(outputDesc_, + 1, + numFilters_, + outputH_, + outputW_, + numFilters_ * outputH_ * outputW_, + outputH_ * outputW_, + outputW_, + 1); + hl_reset_convolution_descriptor(convDesc_, + imageDesc_, + filterDesc_, + paddingY_, + padding_, + strideY_, + stride_); +} + +void ConvBaseOperator::getConvParams() { + configNumFilters_ = config_.num_filters(); + const ConvConfig &conf = config_.conv_conf(); + padding_ = conf.padding(); + stride_ = conf.stride(); + filterSize_ = conf.filter_size(); + paddingY_ = conf.padding_y(); + strideY_ = conf.stride_y(); + filterSizeY_ = conf.filter_size_y(); + filterPixels_ = filterSize_ * filterSizeY_; + configChannels_ = conf.channels(); + imgSize_ = conf.img_size(); + imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + imgPixels_ = imgSize_ * imgSizeY_; + CHECK_EQ(conf.groups(), 1U); + filterChannels_ = conf.filter_channels(); + outputX_ = conf.output_x(); + outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + outputs_ = outputX_ * outputX_; + + isDeconv_ = (config_.type() == "conv") ? false : true; + if (isDeconv_) { + channels_ = configNumFilters_; + numFilters_ = configChannels_; + } else { + channels_ = configChannels_; + numFilters_ = configNumFilters_; + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseOperator.h b/paddle/gserver/layers/ConvBaseOperator.h new file mode 100644 index 0000000000000000000000000000000000000000..2d42169cde2a80a26edcf98bc2d728e00b075728 --- /dev/null +++ b/paddle/gserver/layers/ConvBaseOperator.h @@ -0,0 +1,112 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include "Operator.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * @brief ConvOperator takes two inputs to perform the convolution. + * The first input is the image, and the second input is the convolution kernel. + * The height of data for two inputs are the same. Each data of the first input + * is convolved with each data of the second input indepedently. + * + * The config file api is conv_operator. + */ + +class ConvBaseOperator : public Operator { +public: + ConvBaseOperator(const OperatorConfig &config, bool useGpu); + /** + * Free workspace in device and destroy cudnn tensor descriptor. + */ + virtual ~ConvBaseOperator() { + if (workSpaceInBytes_ != 0) { + hl_free_mem_device(workSpace_); + workSpaceInBytes_ = 0; + } + + hl_destroy_tensor_descriptor(imageDesc_); + hl_destroy_tensor_descriptor(outputDesc_); + hl_destroy_filter_descriptor(filterDesc_); + hl_destroy_convolution_descriptor(convDesc_); + } + +protected: + /** + * Get convolution parameters from layer config and + * initialize member variables. + */ + void getConvParams(); + + /** + * Allocate Gpu Memory for cudnn convolution algorithms. + */ + void allocConvWorkSpace(); + + /** + * Create cudnn tensor descriptor for convolution operation. + */ + void computeConvSizes(); + + /** + * Reshape cudnn tensor descriptor. + */ + void reshapeImageDescriptors(); + + /** + * Reshape cudnn tensor descriptor. + */ + virtual void reshape(int batchSize) = 0; + + /** + * Check filter size is equal to the size calculated by parameters from + * layer config. + */ + void checkFilterSize(const MatrixPtr &filter) { + CHECK_EQ(static_cast(filter->getWidth()), + filterSize_ * filterSizeY_ * channels_ * numFilters_); + } + + /// Most of member variables are same with CudnnConvLayer. + /// There is no explanation here. + bool isDeconv_; + int imageH_, imageW_, outputH_, outputW_; + hl_tensor_descriptor imageDesc_; + hl_tensor_descriptor outputDesc_; + hl_filter_descriptor filterDesc_; + hl_convolution_descriptor convDesc_; + bool caffeMode_; + int inputOffset_, outputOffset_, weightOffset_; + int numFilters_, channels_; + + /// from parsing config + int configNumFilters_, configChannels_; + int padding_, stride_, filterSize_, imgSize_, imgSizeY_; + int paddingY_, strideY_, filterSizeY_; + int imgPixels_, filterPixels_, filterChannels_, outputX_, outputY_, outputs_; + + /// Following member variables are same with CudnnConvLayer. + /// There is no explanation here. + int fwdAlgo_, bwdFilterAlgo_, bwdDataAlgo_; + size_t fwdLimitBytes_, bwdDataLimitBytes_, bwdFilterLimitBytes_; + size_t workSpaceInBytes_; + void *workSpace_; + bool isSelectAlgo_; +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseProjection.cpp b/paddle/gserver/layers/ConvBaseProjection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d1e932ded595c90cbe6040c330c5c8663d81e2b4 --- /dev/null +++ b/paddle/gserver/layers/ConvBaseProjection.cpp @@ -0,0 +1,195 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "ConvBaseProjection.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +ThreadLocalD> ConvBaseProjection::convMem_; + +ConvBaseProjection::ConvBaseProjection(const ProjectionConfig &config, + ParameterPtr parameter, + bool useGpu) + : Projection(config, parameter, useGpu) { + CHECK(useGpu); // only support GPU + getConvParams(); + initCudnn(); + + size_t height = filterH_ * filterW_ * channels_ / groups_; + size_t width = numFilters_; + weight_.reset(new Weight(height, width, parameter)); + weightOffset_ = height * width / groups_; +} + +void ConvBaseProjection::getConvParams() { + const ConvConfig &conf = config_.conv_conf(); + paddingH_ = conf.padding_y(); + paddingW_ = conf.padding(); + + strideH_ = conf.stride_y(); + strideW_ = conf.stride(); + + filterH_ = conf.filter_size_y(); + filterW_ = conf.filter_size(); + + configImgH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + configImgW_ = conf.img_size(); + + configOutH_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + configOutW_ = conf.output_x(); + + configChannels_ = conf.channels(); + configNumFilters_ = config_.num_filters(); + + isDeconv_ = (config_.type() == "conv") ? false : true; + + channels_ = (isDeconv_) ? configNumFilters_ : configChannels_; + numFilters_ = (isDeconv_) ? configChannels_ : configNumFilters_; + + groups_ = conf.groups(); + CHECK_EQ(channels_ % groups_, 0); + CHECK_EQ(numFilters_ % groups_, 0); +} + +void ConvBaseProjection::initCudnn() { + hl_create_filter_descriptor(&filterDesc_, + channels_ / groups_, + numFilters_ / groups_, + filterH_, + filterW_); + hl_create_tensor_descriptor(&imageDesc_); + hl_create_tensor_descriptor(&outputDesc_); + hl_create_convolution_descriptor(&convDesc_, + imageDesc_, + filterDesc_, + paddingH_, + paddingW_, + strideH_, + strideW_); + + // initialize all to default algorithms + fwdAlgo_ = 0; + bwdFilterAlgo_ = 0; + bwdDataAlgo_ = 0; + fwdLimitBytes_ = 0; + bwdDataLimitBytes_ = 0; + bwdFilterLimitBytes_ = 0; + workSpaceInBytes_ = 0; + + batchNum_ = 0; + isSelectAlgo_ = false; +} + +void ConvBaseProjection::reshapeTensorDesc(int batchSize) { + // The stride between two consecutive samples in the output of ConvProjection + // may not be numFilters_ * outputH_ * outputW_ (conv) or + // channels_ * imageH_ * imageW_ (deconv) + // for example, in the case of layer ConcatenateLayer2 with two + // ConvProjection, the stride is the output_size of layer ConcatenateLayer2. + // So the calculation of nStride is different from CudnnConvLayer. + size_t nStrideImage, nStrideOutput; + if (isDeconv_) { + nStrideImage = out_->value->getStride(); + nStrideOutput = numFilters_ * outputH_ * outputW_; + } else { + nStrideImage = channels_ * imageH_ * imageW_; + nStrideOutput = out_->value->getStride(); + } + + hl_tensor_reshape(imageDesc_, + batchSize, + channels_ / groups_, + imageH_, + imageW_, + nStrideImage, + imageH_ * imageW_, + imageW_, + 1); + + hl_tensor_reshape(outputDesc_, + batchSize, + numFilters_ / groups_, + outputH_, + outputW_, + nStrideOutput, + outputH_ * outputW_, + outputW_, + 1); + + hl_reset_convolution_descriptor(convDesc_, + imageDesc_, + filterDesc_, + paddingH_, + paddingW_, + strideH_, + strideW_); +} + +void ConvBaseProjection::reshape(int batchSize) { + size_t width = calOutputSize(); + CHECK_EQ(width, out_->value->getWidth()); + CHECK_EQ(calInputSize(), in_->value->getWidth()); + + isSelectAlgo_ = (batchSize == batchNum_); + batchNum_ = batchSize; + + if (!isSelectAlgo_) { + reshapeTensorDesc(batchSize); + hl_conv_workspace(imageDesc_, + outputDesc_, + filterDesc_, + convDesc_, + &fwdAlgo_, + &fwdLimitBytes_, + &bwdDataAlgo_, + &bwdDataLimitBytes_, + &bwdFilterAlgo_, + &bwdFilterLimitBytes_); + + size_t maxWorkSpace = 0; + maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_); + maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_); + workSpaceInBytes_ = maxWorkSpace; + + VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_ + << " / " << bwdDataAlgo_ << " / " << bwdFilterAlgo_; + } + + isSelectAlgo_ = true; +} + +void *ConvBaseProjection::getSpaceBytes(size_t size) { + std::vector &convMem = *convMem_; + if (convMem.empty()) { + int numDevices = hl_get_device_count(); + convMem.resize(numDevices); + } + + int devId = hl_get_device(); + MemoryHandle **localMem = &(convMem[devId]); + if (NULL == *localMem || size > (*localMem)->getAllocSize()) { + *localMem = new GpuMemoryHandle(size); + } + return (*localMem)->getBuf(); +} + +ConvBaseProjection::~ConvBaseProjection() { + hl_destroy_tensor_descriptor(imageDesc_); + hl_destroy_tensor_descriptor(outputDesc_); + hl_destroy_filter_descriptor(filterDesc_); + hl_destroy_convolution_descriptor(convDesc_); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseProjection.h b/paddle/gserver/layers/ConvBaseProjection.h new file mode 100644 index 0000000000000000000000000000000000000000..4a33aa1837dfc36dbead60deaccbc6b772fe4754 --- /dev/null +++ b/paddle/gserver/layers/ConvBaseProjection.h @@ -0,0 +1,116 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Projection.h" +#include "paddle/math/MathUtils.h" + +namespace paddle { + +/** + * @brief Base class for ConvProjection and ConvTransProjection. + */ +class ConvBaseProjection : public Projection { +public: + /** + * Constructor. + */ + ConvBaseProjection(const ProjectionConfig& config, + ParameterPtr parameter, + bool useGpu); + + ~ConvBaseProjection(); + +protected: + void getConvParams(); + void initCudnn(); + + void reshapeTensorDesc(int batchSize); + void reshape(int batchSize); + + virtual size_t calOutputSize() = 0; + virtual size_t calInputSize() = 0; + + static void* getSpaceBytes(size_t size); + + /// True if it's deconv projection layer, false if it's ConvProjection layer + bool isDeconv_; + /// imageH_ and imageW_ / outputH_ and outputW_ + /// is calculated from the input layer. + int imageH_, imageW_; + int outputH_, outputW_; + /// configImgH_ and configImgW_ / configOutH_ and configOutW_ + /// is obtained from config. + int configImgH_, configImgW_; + int configOutH_, configOutW_; + /// channels_ and numFilters_ are defined in terms of convolution semantics + int channels_, numFilters_; + /// configChannels and configNumFilters_ are obtained from config + /// For Conv they are the same as channels_ and numFilters + /// For ConvTrans they are opposite to channels_ and numFilters + int configChannels_, configNumFilters_; + int paddingH_, paddingW_; + int strideH_, strideW_; + int filterH_, filterW_; + /// One group offset of input data. + int inputOffset_; + /// One group offset of output data. + int outputOffset_; + /// One group offset of weight. + int weightOffset_; + int groups_; + + /// Cudnn tensor descriptor for input. + hl_tensor_descriptor imageDesc_; + /// Cudnn tensor descriptor for output. + hl_tensor_descriptor outputDesc_; + /// Cudnn tensor descriptor for filter. + hl_filter_descriptor filterDesc_; + /// Cudnn tensor descriptor for a convolution operation. + hl_convolution_descriptor convDesc_; + + /// Record the algorithm for forward convolution, which is obtained by cudnn + /// api to search the best suited algorithm. + int fwdAlgo_; + /// Record the algorithm for computing convolution gradient with respect to + /// filter coefficients. + int bwdFilterAlgo_; + /// Record the algorithm for computing convolution gradient with respect to + /// the output. + int bwdDataAlgo_; + /// Amount of GPU memory needed as workspace to be able to execute a + /// forward convolution with the specified algo. + size_t fwdLimitBytes_; + /// Amount of GPU memory needed as workspace to be able to execute a + /// backwardFilter with the specified algo. + size_t bwdDataLimitBytes_; + /// Amount of GPU memory needed as workspace to be able to execute a + /// backwardData with the specified algo. + size_t bwdFilterLimitBytes_; + /// Size of total work space. + size_t workSpaceInBytes_; + + /// Whether to call cuDNN api to choose conv algorithm. + bool isSelectAlgo_; + /// batchNum is used to record batch size. If the batch size is changed, + /// the selection algorithm will be called. + int batchNum_; + bool bias_; + + std::unique_ptr weight_; + static ThreadLocalD> convMem_; +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvOperator.cpp b/paddle/gserver/layers/ConvOperator.cpp index f943410dee0dc2f3d356c9d7d8f61398fe2871c8..80932c8c509e3cb013c7e0051cbf4d8ccced0228 100644 --- a/paddle/gserver/layers/ConvOperator.cpp +++ b/paddle/gserver/layers/ConvOperator.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "Operator.h" +#include "ConvOperator.h" #include "paddle/math/MathUtils.h" #include "paddle/math/Matrix.h" @@ -27,120 +27,8 @@ namespace paddle { * The config file api is conv_operator. */ -class ConvOperator : public Operator { -public: - ConvOperator(const OperatorConfig &config, bool useGpu); - /** - * Free workspace in device and destroy cudnn tensor descriptor. - */ - virtual ~ConvOperator() { - if (workSpaceInBytes_ != 0) { - hl_free_mem_device(workSpace_); - workSpaceInBytes_ = 0; - } - - hl_destroy_tensor_descriptor(inputDesc_); - hl_destroy_tensor_descriptor(outputDesc_); - hl_destroy_filter_descriptor(filterDesc_); - hl_destroy_convolution_descriptor(convDesc_); - } - virtual void forward(); - virtual void backward(); - -private: - /** - * Get convolution parameters from layer config and - * initialize member variables. - */ - void getConvParams(); - - /** - * Allocate Gpu Memory for cudnn convolution algorithms. - */ - void allocConvWorkSpace(size_t maxWorkSpace); - - /** - * Create cudnn tensor descriptor for convolution operation. - */ - void computeConvSizes(); - - /** - * Reshape cudnn tensor descriptor. - */ - void reshapeImageDescriptors(); - - /** - * Reshape cudnn tensor descriptor. - */ - void reshape(int batchSize); - - /** - * Check filter size is equal to the size calculated by parameters from - * layer config. - */ - void checkFilterSize(const MatrixPtr &filter) { - CHECK_EQ(static_cast(filter->getWidth()), - filterSize_ * filterSizeY_ * channels_ * numFilters_); - } - - /// Most of member variables are same with CudnnConvLayer. - /// There is no explanation here. - int imageH_, imageW_, outputH_, outputW_; - hl_tensor_descriptor inputDesc_; - hl_tensor_descriptor outputDesc_; - hl_filter_descriptor filterDesc_; - hl_convolution_descriptor convDesc_; - bool caffeMode_; - int inputOffset_, outputOffset_, weightOffset_; - int numFilters_; - int padding_, stride_, filterSize_, channels_, imgSize_, imgSizeY_; - int paddingY_, strideY_, filterSizeY_; - int imgPixels_, filterPixels_, filterChannels_, outputX_, outputY_, outputs_; - - /// Following member variables are same with CudnnConvLayer. - /// There is no explanation here. - int fwdAlgo_, bwdFilterAlgo_, bwdDataAlgo_; - size_t fwdLimitBytes_, bwdDataLimitBytes_, bwdFilterLimitBytes_; - size_t workSpaceInBytes_; - void *workSpace_; - bool isSelectAlgo_; -}; - REGISTER_OPERATOR(conv, ConvOperator); -ConvOperator::ConvOperator(const OperatorConfig &config, bool useGpu) - : Operator(config, useGpu) { - CHECK(useGpu); - CHECK_EQ(config_.input_indices_size(), 2L); - - caffeMode_ = true; - getConvParams(); - computeConvSizes(); - - // initialize all to default algorithms - fwdAlgo_ = 0; - bwdFilterAlgo_ = 0; - bwdDataAlgo_ = 0; - fwdLimitBytes_ = 0; - bwdDataLimitBytes_ = 0; - bwdFilterLimitBytes_ = 0; - workSpaceInBytes_ = 0; - workSpace_ = nullptr; - - isSelectAlgo_ = false; -} - -void ConvOperator::allocConvWorkSpace(size_t maxWorkSpace) { - if (maxWorkSpace > workSpaceInBytes_) { - if (workSpaceInBytes_ != 0) { - hl_free_mem_device(workSpace_); - } - // total amount of storage needed - workSpace_ = hl_malloc_device(maxWorkSpace); - workSpaceInBytes_ = maxWorkSpace; - } -} - void ConvOperator::reshape(int batchSize) { imageH_ = ins_[0]->getFrameHeight(); imageW_ = ins_[0]->getFrameWidth(); @@ -148,106 +36,25 @@ void ConvOperator::reshape(int batchSize) { if (imageW_ == 0) imageW_ = imgSize_; outputH_ = outputSize(imageH_, filterSizeY_, paddingY_, strideY_, caffeMode_); outputW_ = outputSize(imageW_, filterSize_, padding_, stride_, caffeMode_); - + /// Check that the outputSizes are consistent with config + CHECK_EQ(outputH_, outputY_); + CHECK_EQ(outputW_, outputX_); out_->setFrameHeight(outputH_); out_->setFrameWidth(outputW_); reshapeImageDescriptors(); - if (!isSelectAlgo_) { - hl_conv_workspace(inputDesc_, - outputDesc_, - filterDesc_, - convDesc_, - &fwdAlgo_, - &fwdLimitBytes_, - &bwdDataAlgo_, - &bwdDataLimitBytes_, - &bwdFilterAlgo_, - &bwdFilterLimitBytes_); - - size_t maxWorkSpace = 0; - maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_); - maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_); + inputOffset_ = channels_ * imageH_ * imageW_; + outputOffset_ = numFilters_ * outputH_ * outputW_; + weightOffset_ = numFilters_ * channels_ * filterSize_ * filterSizeY_; - allocConvWorkSpace(maxWorkSpace); + if (!isSelectAlgo_) { + allocConvWorkSpace(); } isSelectAlgo_ = true; } -void ConvOperator::computeConvSizes() { - hl_create_filter_descriptor( - &filterDesc_, channels_, numFilters_, filterSizeY_, filterSize_); - hl_create_tensor_descriptor(&inputDesc_); - int outputX = - outputSize(imgSize_, filterSize_, padding_, stride_, caffeMode_); - int outputY = - outputSize(imgSizeY_, filterSizeY_, paddingY_, strideY_, caffeMode_); - CHECK_EQ(outputX, outputX_); - CHECK_EQ(outputY, outputY_); - hl_create_tensor_descriptor(&outputDesc_); - hl_create_convolution_descriptor(&convDesc_, - inputDesc_, - filterDesc_, - paddingY_, - padding_, - strideY_, - stride_); -} - -void ConvOperator::reshapeImageDescriptors() { - hl_tensor_reshape(inputDesc_, - 1, - channels_, - imageH_, - imageW_, - channels_ * imageH_ * imageW_, - imageH_ * imageW_, - imageW_, - 1); - hl_tensor_reshape(outputDesc_, - 1, - numFilters_, - outputH_, - outputW_, - numFilters_ * outputH_ * outputW_, - outputH_ * outputW_, - outputW_, - 1); - hl_reset_convolution_descriptor(convDesc_, - inputDesc_, - filterDesc_, - paddingY_, - padding_, - strideY_, - stride_); - inputOffset_ = channels_ * imageH_ * imageW_; - outputOffset_ = numFilters_ * outputH_ * outputW_; - weightOffset_ = numFilters_ * channels_ * filterSize_ * filterSize_; -} - -void ConvOperator::getConvParams() { - numFilters_ = config_.num_filters(); - const ConvConfig &conf = config_.conv_conf(); - padding_ = conf.padding(); - stride_ = conf.stride(); - filterSize_ = conf.filter_size(); - paddingY_ = conf.padding_y(); - strideY_ = conf.stride_y(); - filterSizeY_ = conf.filter_size_y(); - filterPixels_ = filterSize_ * filterSizeY_; - channels_ = conf.channels(); - imgSize_ = conf.img_size(); - imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); - imgPixels_ = imgSize_ * imgSizeY_; - CHECK_EQ(conf.groups(), 1U); - filterChannels_ = conf.filter_channels(); - outputX_ = conf.output_x(); - outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); - outputs_ = outputX_ * outputX_; -} - void ConvOperator::forward() { size_t batchSize = ins_[0]->value->getHeight(); reshape(batchSize); @@ -264,7 +71,7 @@ void ConvOperator::forward() { real *inputData = ins_[0]->value->getData() + inputOffset_ * batchId; real *wgtData = ins_[1]->value->getData() + weightOffset_ * batchId; real *outData = out_->value->getData() + outputOffset_ * batchId; - hl_convolution_forward(inputDesc_, + hl_convolution_forward(imageDesc_, inputData, outputDesc_, outData, @@ -287,7 +94,7 @@ void ConvOperator::backward() { if (ins_[1]->grad) { real *inputData = ins_[0]->value->getData() + inputOffset_ * batchId; real *weightGrad = ins_[1]->grad->getData() + weightOffset_ * batchId; - hl_convolution_backward_filter(inputDesc_, + hl_convolution_backward_filter(imageDesc_, inputData, outputDesc_, outGrad, @@ -303,7 +110,7 @@ void ConvOperator::backward() { if (NULL != preGrad) { real *inputGrad = preGrad->getData() + inputOffset_ * batchId; real *wgtData = ins_[1]->value->getData() + weightOffset_ * batchId; - hl_convolution_backward_data(inputDesc_, + hl_convolution_backward_data(imageDesc_, inputGrad, outputDesc_, outGrad, diff --git a/paddle/gserver/layers/ConvOperator.h b/paddle/gserver/layers/ConvOperator.h new file mode 100644 index 0000000000000000000000000000000000000000..0f3546c67ac174628044d5fb6e5c7bce06f37995 --- /dev/null +++ b/paddle/gserver/layers/ConvOperator.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include "ConvBaseOperator.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * @brief ConvOperator takes two inputs to perform the convolution. + * The first input is the image, and the second input is the convolution kernel. + * The height of data for two inputs are the same. Each data of the first input + * is convolved with each data of the second input indepedently. + * + * The config file api is conv_operator. + */ + +class ConvOperator : public ConvBaseOperator { +public: + ConvOperator(const OperatorConfig &config, bool useGpu) + : ConvBaseOperator(config, useGpu) {} + /** + * Free workspace in device and destroy cudnn tensor descriptor. + */ + virtual ~ConvOperator() {} + void forward() override; + void backward() override; + void reshape(int batchSize) override; +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp index 0281170bc59855f6f4d2f4212523275a92d202d5..5b7ecc5560c1e7431305b34a331fe1fbc96c6b06 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -19,149 +19,32 @@ namespace paddle { REGISTER_PROJECTION(conv, ConvProjection); -ThreadLocalD> ConvProjection::convMem_; - -ConvProjection::ConvProjection(const ProjectionConfig &config, - ParameterPtr parameter, - bool useGpu) - : Projection(config, parameter, useGpu) { - CHECK(useGpu); // only support GPU - getConvParams(); - initCudnn(); - - size_t height = filterH_ * filterW_ * channels_ / groups_; - size_t width = numFilters_; - weight_.reset(new Weight(height, width, parameter)); - weightOffset_ = height * width / groups_; -} - -void ConvProjection::getConvParams() { - const ConvConfig &conf = config_.conv_conf(); - paddingH_ = conf.padding_y(); - paddingW_ = conf.padding(); - - strideH_ = conf.stride_y(); - strideW_ = conf.stride(); - - filterH_ = conf.filter_size_y(); - filterW_ = conf.filter_size(); - - configImgH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); - configImgW_ = conf.img_size(); - - channels_ = conf.channels(); - numFilters_ = config_.num_filters(); - - groups_ = conf.groups(); - CHECK_EQ(channels_ % groups_, 0); - CHECK_EQ(numFilters_ % groups_, 0); -} - -void ConvProjection::initCudnn() { - hl_create_filter_descriptor(&filterDesc_, - channels_ / groups_, - numFilters_ / groups_, - filterH_, - filterW_); - hl_create_tensor_descriptor(&inputDesc_); - hl_create_tensor_descriptor(&outputDesc_); - hl_create_convolution_descriptor(&convDesc_, - inputDesc_, - filterDesc_, - paddingH_, - paddingW_, - strideH_, - strideW_); - - // initialize all to default algorithms - fwdAlgo_ = 0; - bwdFilterAlgo_ = 0; - bwdDataAlgo_ = 0; - fwdLimitBytes_ = 0; - bwdDataLimitBytes_ = 0; - bwdFilterLimitBytes_ = 0; - workSpaceInBytes_ = 0; - - batchNum_ = 0; - isSelectAlgo_ = false; -} - -void ConvProjection::reshapeTensorDesc(int batchSize) { - hl_tensor_reshape(inputDesc_, - batchSize, - channels_ / groups_, - imageH_, - imageW_, - channels_ * imageH_ * imageW_, - imageH_ * imageW_, - imageW_, - 1); - hl_reset_convolution_descriptor(convDesc_, - inputDesc_, - filterDesc_, - paddingH_, - paddingW_, - strideH_, - strideW_); - - // The stride between two consecutive images in ConvProjection may not be 1, - // for example, in the case of layer ConcatenateLayer2 with two - // ConvProjection, the stride is the output_size of layer ConcatenateLayer2. - // So the calculation of nStride is different from CudnnConvLayer. - // In fact, only "nStride = out_->value->getStride()" is ok. - size_t nStride = numFilters_ * outputH_ * outputW_; - if (out_->value->isContiguous()) { - CHECK_EQ(nStride, out_->value->getWidth()); - } else { - nStride = out_->value->getStride(); - } - - hl_tensor_reshape(outputDesc_, - batchSize, - numFilters_ / groups_, - outputH_, - outputW_, - nStride, - outputH_ * outputW_, - outputW_, - 1); +size_t ConvProjection::calOutputSize() { + imageH_ = in_->getFrameHeight(); + imageW_ = in_->getFrameWidth(); + if (imageH_ == 0) imageH_ = configImgH_; + if (imageW_ == 0) imageW_ = configImgW_; + outputH_ = outputSize(imageH_, + filterH_, + paddingH_, + strideH_, + /* caffeMode */ true); + outputW_ = outputSize(imageW_, + filterW_, + paddingW_, + strideW_, + /* caffeMode */ true); + + const_cast(out_)->setFrameHeight(outputH_); + const_cast(out_)->setFrameWidth(outputW_); + + inputOffset_ = (configChannels_ / groups_) * imageH_ * imageW_; + outputOffset_ = (configNumFilters_ / groups_) * outputH_ * outputW_; + return outputH_ * outputW_ * configNumFilters_; } -void ConvProjection::reshape(int batchSize) { - size_t width = calOutputSize(); - CHECK_EQ(width, out_->value->getWidth()); - CHECK_EQ(static_cast(channels_ * imageH_ * imageW_), - in_->value->getWidth()) - << "Wrong input size for convolution" - << " channels=" << channels_ << " imageH=" << imageH_ - << " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth(); - - isSelectAlgo_ = (batchSize == batchNum_); - batchNum_ = batchSize; - - if (!isSelectAlgo_) { - reshapeTensorDesc(batchSize); - hl_conv_workspace(inputDesc_, - outputDesc_, - filterDesc_, - convDesc_, - &fwdAlgo_, - &fwdLimitBytes_, - &bwdDataAlgo_, - &bwdDataLimitBytes_, - &bwdFilterAlgo_, - &bwdFilterLimitBytes_); - - size_t maxWorkSpace = 0; - maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_); - maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_); - workSpaceInBytes_ = maxWorkSpace; - - VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_ - << " / " << bwdDataAlgo_ << " / " << bwdFilterAlgo_; - } - - isSelectAlgo_ = true; +size_t ConvProjection::calInputSize() { + return static_cast(configChannels_ * imageH_ * imageW_); } void ConvProjection::forward() { @@ -179,7 +62,7 @@ void ConvProjection::forward() { real *inputData = in_->value->getData() + g * inputOffset_; real *wgtData = weight_->getW()->getData() + g * weightOffset_; real *outData = out_->value->getData() + g * outputOffset_; - hl_convolution_forward(inputDesc_, + hl_convolution_forward(imageDesc_, inputData, outputDesc_, outData, @@ -205,7 +88,7 @@ void ConvProjection::backward(const UpdateCallback &callback) { if (weight_->getWGrad()) { real *inputData = in_->value->getData() + g * inputOffset_; real *weightGrad = weight_->getWGrad()->getData() + g * weightOffset_; - hl_convolution_backward_filter(inputDesc_, + hl_convolution_backward_filter(imageDesc_, inputData, outputDesc_, outGrad, @@ -221,7 +104,7 @@ void ConvProjection::backward(const UpdateCallback &callback) { if (NULL != preGrad) { real *inputGrad = preGrad->getData() + g * inputOffset_; real *wgtData = weight_->getW()->getData() + g * weightOffset_; - hl_convolution_backward_data(inputDesc_, + hl_convolution_backward_data(imageDesc_, inputGrad, outputDesc_, outGrad, @@ -237,26 +120,4 @@ void ConvProjection::backward(const UpdateCallback &callback) { weight_->getParameterPtr()->incUpdate(callback); } -void *ConvProjection::getSpaceBytes(size_t size) { - std::vector &convMem = *convMem_; - if (convMem.empty()) { - int numDevices = hl_get_device_count(); - convMem.resize(numDevices); - } - - int devId = hl_get_device(); - MemoryHandle **localMem = &(convMem[devId]); - if (NULL == *localMem || size > (*localMem)->getAllocSize()) { - *localMem = new GpuMemoryHandle(size); - } - return (*localMem)->getBuf(); -} - -ConvProjection::~ConvProjection() { - hl_destroy_tensor_descriptor(inputDesc_); - hl_destroy_tensor_descriptor(outputDesc_); - hl_destroy_filter_descriptor(filterDesc_); - hl_destroy_convolution_descriptor(convDesc_); -} - } // namespace paddle diff --git a/paddle/gserver/layers/ConvProjection.h b/paddle/gserver/layers/ConvProjection.h index c32e5e1d3ab2f85feb6dd2fb5fbddd7482598e58..b7d7cc9a275529a02a5d8e82d28ed79cb7ce0b43 100644 --- a/paddle/gserver/layers/ConvProjection.h +++ b/paddle/gserver/layers/ConvProjection.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "Projection.h" +#include "ConvBaseProjection.h" #include "paddle/math/MathUtils.h" namespace paddle { @@ -22,109 +22,22 @@ namespace paddle { /** * @brief Convolution projection do the same calculation with CudnnConvLayer. */ -class ConvProjection : public Projection { +class ConvProjection : public ConvBaseProjection { public: /** * Constructor. */ ConvProjection(const ProjectionConfig& config, ParameterPtr parameter, - bool useGpu); + bool useGpu) + : ConvBaseProjection(config, parameter, useGpu) {} - ~ConvProjection(); + ~ConvProjection() {} virtual void forward(); virtual void backward(const UpdateCallback& callback); - -protected: - void getConvParams(); - void initCudnn(); - - void reshapeTensorDesc(int batchSize); - void reshape(int batchSize); - - size_t calOutputSize() { - imageH_ = in_->getFrameHeight(); - imageW_ = in_->getFrameWidth(); - if (imageH_ == 0) imageH_ = configImgH_; - if (imageW_ == 0) imageW_ = configImgW_; - outputH_ = outputSize(imageH_, - filterH_, - paddingH_, - strideH_, - /* caffeMode */ true); - outputW_ = outputSize(imageW_, - filterW_, - paddingW_, - strideW_, - /* caffeMode */ true); - - const_cast(out_)->setFrameHeight(outputH_); - const_cast(out_)->setFrameWidth(outputW_); - - inputOffset_ = (channels_ / groups_) * imageH_ * imageW_; - outputOffset_ = (numFilters_ / groups_) * outputH_ * outputW_; - return outputH_ * outputW_ * numFilters_; - } - - static void* getSpaceBytes(size_t size); - - /// imageH_ and imageW_ is calculated from the input layer. - int imageH_, imageW_; - /// configImgH_ and configImgW_ is obtained from config. - int configImgH_, configImgW_; - int outputH_, outputW_; - int channels_, numFilters_; - int paddingH_, paddingW_; - int strideH_, strideW_; - int filterH_, filterW_; - /// One group offset of input data. - int inputOffset_; - /// One group offset of output data. - int outputOffset_; - /// One group offset of weight. - int weightOffset_; - int groups_; - - /// Cudnn tensor descriptor for input. - hl_tensor_descriptor inputDesc_; - /// Cudnn tensor descriptor for output. - hl_tensor_descriptor outputDesc_; - /// Cudnn tensor descriptor for filter. - hl_filter_descriptor filterDesc_; - /// Cudnn tensor descriptor for a convolution operation. - hl_convolution_descriptor convDesc_; - - /// Record the algorithm for forward convolution, which is obtained by cudnn - /// api to search the best suited algorithm. - int fwdAlgo_; - /// Record the algorithm for computing convolution gradient with respect to - /// filter coefficients. - int bwdFilterAlgo_; - /// Record the algorithm for computing convolution gradient with respect to - /// the output. - int bwdDataAlgo_; - /// Amount of GPU memory needed as workspace to be able to execute a - /// forward convolution with the specified algo. - size_t fwdLimitBytes_; - /// Amount of GPU memory needed as workspace to be able to execute a - /// backwardFilter with the specified algo. - size_t bwdDataLimitBytes_; - /// Amount of GPU memory needed as workspace to be able to execute a - /// backwardData with the specified algo. - size_t bwdFilterLimitBytes_; - /// Size of total work space. - size_t workSpaceInBytes_; - - /// Whether to call cuDNN api to choose conv algorithm. - bool isSelectAlgo_; - /// batchNum is used to record batch size. If the batch size is changed, - /// the selection algorithm will be called. - int batchNum_; - bool bias_; - - std::unique_ptr weight_; - static ThreadLocalD> convMem_; + virtual size_t calOutputSize(); + virtual size_t calInputSize(); }; } // namespace paddle diff --git a/paddle/gserver/layers/ConvTransOperator.cpp b/paddle/gserver/layers/ConvTransOperator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..db026337a473f7edf1a7c0db320f60ff3048eb9c --- /dev/null +++ b/paddle/gserver/layers/ConvTransOperator.cpp @@ -0,0 +1,125 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "ConvTransOperator.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * @brief ConvTransOperator takes two inputs to perform the convolution. + * The first input is the image, and the second input is the convolution kernel. + * The height of data for two inputs are the same. Each data of the first input + * is convolved with each data of the second input indepedently. + * + * The config file api is conv_operator. + */ + +REGISTER_OPERATOR(convt, ConvTransOperator); + +void ConvTransOperator::reshape(int batchSize) { + outputH_ = ins_[0]->getFrameHeight(); + outputW_ = ins_[0]->getFrameWidth(); + if (outputH_ == 0) outputH_ = outputY_; + if (outputW_ == 0) outputW_ = outputX_; + imageH_ = imageSize(outputH_, filterSizeY_, paddingY_, strideY_, caffeMode_); + imageW_ = imageSize(outputW_, filterSize_, padding_, stride_, caffeMode_); + /// Check that the imageSizes are consistent with config + CHECK_EQ(imageH_, imgSizeY_); + CHECK_EQ(imageW_, imgSize_); + out_->setFrameHeight(imageH_); + out_->setFrameWidth(imageW_); + + reshapeImageDescriptors(); + + inputOffset_ = numFilters_ * outputH_ * outputW_; + outputOffset_ = channels_ * imageH_ * imageW_; + weightOffset_ = numFilters_ * channels_ * filterSize_ * filterSizeY_; + + if (!isSelectAlgo_) { + allocConvWorkSpace(); + } + + isSelectAlgo_ = true; +} + +void ConvTransOperator::forward() { + size_t batchSize = ins_[0]->value->getHeight(); + reshape(batchSize); + CHECK_EQ(ins_[1]->value->getHeight(), batchSize); + checkFilterSize(ins_[1]->value); + Matrix::resizeOrCreate( + out_->value, batchSize, imageH_ * imageW_ * channels_, false, useGpu_); + { + AsyncGpuBlock block; + for (size_t batchId = 0; batchId < batchSize; ++batchId) { + real *inputData = ins_[0]->value->getData() + inputOffset_ * batchId; + real *wgtData = ins_[1]->value->getData() + weightOffset_ * batchId; + real *outData = out_->value->getData() + outputOffset_ * batchId; + hl_convolution_backward_data(imageDesc_, + outData, + outputDesc_, + inputData, + filterDesc_, + wgtData, + convDesc_, + workSpace_, + workSpaceInBytes_, + bwdDataAlgo_); + } + } +} + +void ConvTransOperator::backward() { + size_t batchSize = ins_[0]->value->getHeight(); + { + AsyncGpuBlock block; + for (size_t batchId = 0; batchId < batchSize; ++batchId) { + real *outGrad = out_->grad->getData() + outputOffset_ * batchId; + if (ins_[1]->grad) { + real *inputData = ins_[0]->value->getData() + inputOffset_ * batchId; + real *weightGrad = ins_[1]->grad->getData() + weightOffset_ * batchId; + hl_convolution_backward_filter(imageDesc_, + outGrad, + outputDesc_, + inputData, + filterDesc_, + weightGrad, + convDesc_, + workSpace_, + workSpaceInBytes_, + bwdFilterAlgo_); + } + + MatrixPtr preGrad = ins_[0]->grad; + if (NULL != preGrad) { + real *inputGrad = preGrad->getData() + inputOffset_ * batchId; + real *wgtData = ins_[1]->value->getData() + weightOffset_ * batchId; + hl_convolution_forward(imageDesc_, + outGrad, + outputDesc_, + inputGrad, + filterDesc_, + wgtData, + convDesc_, + workSpace_, + workSpaceInBytes_, + fwdAlgo_); + } + } + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvTransOperator.h b/paddle/gserver/layers/ConvTransOperator.h new file mode 100644 index 0000000000000000000000000000000000000000..ca08dc9aa77d59b45635c16cdd5064c5c3b5f96d --- /dev/null +++ b/paddle/gserver/layers/ConvTransOperator.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include "ConvBaseOperator.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * @brief ConvTransOperator takes two inputs to perform the convolution. + * The first input is the image, and the second input is the convolution kernel. + * The height of data for two inputs are the same. Each data of the first input + * is convolved with each data of the second input indepedently. + * + * The config file api is conv_operator. + */ + +class ConvTransOperator : public ConvBaseOperator { +public: + ConvTransOperator(const OperatorConfig &config, bool useGpu) + : ConvBaseOperator(config, useGpu) {} + /** + * Free workspace in device and destroy cudnn tensor descriptor. + */ + virtual ~ConvTransOperator() {} + void forward() override; + void backward() override; + void reshape(int batchSize) override; +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvTransProjection.cpp b/paddle/gserver/layers/ConvTransProjection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..48132a3ce4cc4b50fea6d755d84d7254d2055bec --- /dev/null +++ b/paddle/gserver/layers/ConvTransProjection.cpp @@ -0,0 +1,123 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "ConvTransProjection.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +REGISTER_PROJECTION(convt, ConvTransProjection); +size_t ConvTransProjection::calOutputSize() { + outputH_ = in_->getFrameHeight(); + outputW_ = in_->getFrameWidth(); + if (outputH_ == 0) outputH_ = configOutH_; + if (outputW_ == 0) outputW_ = configOutW_; + imageH_ = imageSize(outputH_, + filterH_, + paddingH_, + strideH_, + /* caffeMode */ true); + + imageW_ = imageSize(outputW_, + filterW_, + paddingW_, + strideW_, + /* caffeMode */ true); + + const_cast(out_)->setFrameHeight(imageH_); + const_cast(out_)->setFrameWidth(imageW_); + + inputOffset_ = (configChannels_ / groups_) * outputH_ * outputW_; + outputOffset_ = (configNumFilters_ / groups_) * imageH_ * imageW_; + return imageH_ * imageW_ * configNumFilters_; +} + +size_t ConvTransProjection::calInputSize() { + return static_cast(configChannels_ * outputH_ * outputW_); +} + +void ConvTransProjection::forward() { + int batchSize = in_->value->getHeight(); + reshape(batchSize); + + void *workSpace = NULL; + if (workSpaceInBytes_ > 0) { + workSpace = getSpaceBytes(workSpaceInBytes_); + } + + for (int g = 0; g < groups_; ++g) { + REGISTER_TIMER_INFO("CudnnConvTransFwTimer", getName().c_str()); + + real *inData = in_->value->getData() + g * inputOffset_; + real *wgtData = weight_->getW()->getData() + g * weightOffset_; + real *outData = out_->value->getData() + g * outputOffset_; + hl_convolution_backward_data(imageDesc_, + outData, + outputDesc_, + inData, + filterDesc_, + wgtData, + convDesc_, + workSpace, + bwdDataLimitBytes_, + bwdDataAlgo_); + } +} + +void ConvTransProjection::backward(const UpdateCallback &callback) { + REGISTER_TIMER_INFO("CudnnConvTransBpTimer", getName().c_str()); + + void *workSpace = NULL; + if (workSpaceInBytes_ > 0) { + workSpace = getSpaceBytes(workSpaceInBytes_); + } + + for (int g = 0; g < groups_; ++g) { + real *outGrad = out_->grad->getData() + g * outputOffset_; + if (weight_->getWGrad()) { + real *inData = in_->value->getData() + g * inputOffset_; + real *weightGrad = weight_->getWGrad()->getData() + g * weightOffset_; + hl_convolution_backward_filter(imageDesc_, + outGrad, + outputDesc_, + inData, + filterDesc_, + weightGrad, + convDesc_, + workSpace, + bwdFilterLimitBytes_, + bwdFilterAlgo_); + } + + MatrixPtr preGrad = in_->grad; + if (NULL != preGrad) { + real *inGrad = preGrad->getData() + g * inputOffset_; + real *wgtData = weight_->getW()->getData() + g * weightOffset_; + hl_convolution_forward(imageDesc_, + outGrad, + outputDesc_, + inGrad, + filterDesc_, + wgtData, + convDesc_, + workSpace, + fwdLimitBytes_, + fwdAlgo_); + } + } + + weight_->getParameterPtr()->incUpdate(callback); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvTransProjection.h b/paddle/gserver/layers/ConvTransProjection.h new file mode 100644 index 0000000000000000000000000000000000000000..6508d17b2409aa0cc11cdafb306604816f010718 --- /dev/null +++ b/paddle/gserver/layers/ConvTransProjection.h @@ -0,0 +1,43 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "ConvBaseProjection.h" +#include "paddle/math/MathUtils.h" + +namespace paddle { + +/** + * @brief Convolution projection do the same calculation with CudnnConvLayer. + */ +class ConvTransProjection : public ConvBaseProjection { +public: + /** + * Constructor. + */ + ConvTransProjection(const ProjectionConfig& config, + ParameterPtr parameter, + bool useGpu) + : ConvBaseProjection(config, parameter, useGpu) {} + + ~ConvTransProjection() {} + + virtual void forward(); + virtual void backward(const UpdateCallback& callback); + virtual size_t calOutputSize(); + virtual size_t calInputSize(); +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/CosSimLayer.cpp b/paddle/gserver/layers/CosSimLayer.cpp index a6c0300acf6752a3536e7939577b561fd97d1eb8..57ba124e40cbd098fa8b0012ff31d6935b16862a 100644 --- a/paddle/gserver/layers/CosSimLayer.cpp +++ b/paddle/gserver/layers/CosSimLayer.cpp @@ -42,7 +42,7 @@ void CosSimLayer::forward(PassType passType) { /* malloc memory for the output_ if necessary */ int batchSize = getInputValue(0)->getHeight(); int size = getSize(); - CHECK_EQ(forward_.size(), 1) << "Only one forward function needed"; + CHECK_EQ(forward_.size(), 1UL) << "Only one forward function needed"; { REGISTER_TIMER_INFO("CosFwResetTimer", getName().c_str()); @@ -68,7 +68,7 @@ void CosSimLayer::forward(PassType passType) { void CosSimLayer::backward(const UpdateCallback& callback) { /* activation */ { REGISTER_TIMER_INFO("CosBpAtvTimer", getName().c_str()); - CHECK_EQ(backward_.size(), 1) << "Only one backward function needed"; + CHECK_EQ(backward_.size(), 1UL) << "Only one backward function needed"; const auto outG = this->getOutputGrad(); const auto outV = this->getOutputValue(); diff --git a/paddle/gserver/layers/CosSimVecMatLayer.cpp b/paddle/gserver/layers/CosSimVecMatLayer.cpp index aabafd473aa1e06a767d48d4c49b7b8662e992e7..0f887d8adfa053e8fe88ac4fa4e2a9ba08ac07b5 100644 --- a/paddle/gserver/layers/CosSimVecMatLayer.cpp +++ b/paddle/gserver/layers/CosSimVecMatLayer.cpp @@ -112,7 +112,7 @@ bool CosSimVecMatLayer::init(const LayerMap& layerMap, void CosSimVecMatLayer::forward(PassType passType) { Layer::forward(passType); - CHECK_EQ(forward_.size(), 1) << "Only one forward function needed"; + CHECK_EQ(forward_.size(), 1UL) << "Only one forward function needed"; MatrixPtr inV0 = getInputValue(0); MatrixPtr inV1 = getInputValue(1); @@ -145,7 +145,7 @@ void CosSimVecMatLayer::forward(PassType passType) { } void CosSimVecMatLayer::backward(const UpdateCallback& callback) { - CHECK_EQ(backward_.size(), 1) << "Only one forward function needed"; + CHECK_EQ(backward_.size(), 1UL) << "Only one forward function needed"; MatrixPtr inV0 = getInputValue(0); MatrixPtr inV1 = getInputValue(1); diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index 998b8d7d3034cb18fbab242c66656092bfc50fcb..4ae5b828707eb8412e98cbefcf3949d62e81ad1e 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -192,6 +192,59 @@ void SumOfSquaresCostLayer::backwardImp(Matrix& output, outputG.sumOfSquaresBp(output, *label.value); } +// +// class SmoothL1CostLayer +// + +REGISTER_LAYER(smooth_l1, SmoothL1CostLayer); + +bool SmoothL1CostLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + return CostLayer::init(layerMap, parameterMap); +} + +void SmoothL1CostLayer::forwardImp(Matrix& output, + Argument& label, + Matrix& target) { + MatrixPtr targetCpu, outputCpu, labelCpu; + if (useGpu_) { + targetCpu = + Matrix::create(target.getHeight(), target.getWidth(), false, false); + outputCpu = + Matrix::create(output.getHeight(), output.getWidth(), false, false); + labelCpu = Matrix::create( + label.value->getHeight(), label.value->getWidth(), false, false); + targetCpu->copyFrom(target); + outputCpu->copyFrom(output); + labelCpu->copyFrom(*label.value); + targetCpu->smoothL1(*outputCpu, *(labelCpu)); + target.copyFrom(*targetCpu); + } else { + target.smoothL1(output, *label.value); + } +} + +void SmoothL1CostLayer::backwardImp(Matrix& output, + Argument& label, + Matrix& outputG) { + MatrixPtr outputGCpu, outputCpu, labelCpu; + if (useGpu_) { + outputGCpu = + Matrix::create(outputG.getHeight(), outputG.getWidth(), false, false); + outputCpu = + Matrix::create(output.getHeight(), output.getWidth(), false, false); + labelCpu = Matrix::create( + label.value->getHeight(), label.value->getWidth(), false, false); + outputGCpu->copyFrom(outputG); + outputCpu->copyFrom(output); + labelCpu->copyFrom(*label.value); + outputGCpu->smoothL1Bp(*outputCpu, *labelCpu); + outputG.copyFrom(*outputGCpu); + } else { + outputG.smoothL1Bp(output, *label.value); + } +} + // // class RankingCost // diff --git a/paddle/gserver/layers/CostLayer.h b/paddle/gserver/layers/CostLayer.h index b3045e0b31308abf2caa90cbd21f105e685ef341..569a6840f0d4432cc827219f590b821df115c7ea 100644 --- a/paddle/gserver/layers/CostLayer.h +++ b/paddle/gserver/layers/CostLayer.h @@ -159,6 +159,29 @@ public: Matrix& outputGrad) override; }; +/** + * This cost layer compute smooth L1 loss for real-valued regression + * tasks. + * \f[ + * L = + * (output - label)^2 * 0.5 / -1 < (output - label) < 1 / + * (output - label) - 0.5 / otherwise / + * \f] + */ +class SmoothL1CostLayer : public CostLayer { +public: + explicit SmoothL1CostLayer(const LayerConfig& config) : CostLayer(config) {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void forwardImp(Matrix& output, Argument& label, Matrix& cost) override; + + void backwardImp(Matrix& outputValue, + Argument& label, + Matrix& outputGrad) override; +}; + /** * A cost layer for learning to rank (LTR) task. This layer contains at leat * three inputs. diff --git a/paddle/gserver/layers/CrossChannelNormLayer.cpp b/paddle/gserver/layers/CrossChannelNormLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3fbccc11032caa4878ce8dcfe7c34a261acee68b --- /dev/null +++ b/paddle/gserver/layers/CrossChannelNormLayer.cpp @@ -0,0 +1,122 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "Layer.h" +#include "NormLayer.h" +#include "paddle/math/BaseMatrix.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +MatrixPtr CrossChannelNormLayer::createSampleMatrix(MatrixPtr data, + size_t iter, + size_t spatialDim) { + return Matrix::create(data->getData() + iter * channels_ * spatialDim, + channels_, + spatialDim, + false, + useGpu_); +} + +MatrixPtr CrossChannelNormLayer::createSpatialMatrix(MatrixPtr data, + size_t iter, + size_t spatialDim) { + return Matrix::create( + data->getData() + iter * spatialDim, 1, spatialDim, false, useGpu_); +} + +void CrossChannelNormLayer::forward(PassType passType) { + Layer::forward(passType); + MatrixPtr inV = getInputValue(0); + + size_t batchSize = inV->getHeight(); + size_t dataDim = inV->getWidth(); + CHECK_EQ(getSize(), dataDim); + + reserveOutput(batchSize, dataDim); + MatrixPtr outV = getOutputValue(); + size_t spatialDim = dataDim / channels_; + + Matrix::resizeOrCreate(dataBuffer_, batchSize, dataDim, false, useGpu_); + Matrix::resizeOrCreate(spatialBuffer_, 1, spatialDim, false, useGpu_); + Matrix::resizeOrCreate(normBuffer_, batchSize, spatialDim, false, useGpu_); + normBuffer_->zeroMem(); + // add eps to avoid overflow + normBuffer_->addScalar(*normBuffer_, 1e-6); + inV->square2(*dataBuffer_); + for (size_t i = 0; i < batchSize; i++) { + const MatrixPtr inVTmp = createSampleMatrix(inV, i, spatialDim); + const MatrixPtr dataTmp = createSampleMatrix(dataBuffer_, i, spatialDim); + MatrixPtr outVTmp = createSampleMatrix(outV, i, spatialDim); + MatrixPtr normTmp = createSpatialMatrix(normBuffer_, i, spatialDim); + + // compute norm. + spatialBuffer_->sumCols(*dataTmp, 1, 0); + spatialBuffer_->sqrt2(*spatialBuffer_); + normTmp->copyFrom(*spatialBuffer_); + outVTmp->copyFrom(*inVTmp); + outVTmp->divRowVector(*spatialBuffer_); + // scale the layer. + outVTmp->mulColVector(*scale_->getW()); + } +} + +void CrossChannelNormLayer::backward(const UpdateCallback& callback) { + MatrixPtr inG = getInputGrad(0); + MatrixPtr inV = getInputValue(0); + MatrixPtr outG = getOutputGrad(); + MatrixPtr outV = getOutputValue(); + + size_t batchSize = inG->getHeight(); + size_t dataDim = inG->getWidth(); + size_t spatialDim = dataDim / channels_; + + dataBuffer_->dotMul(*outG, *outV); + Matrix::resizeOrCreate(scaleDiff_, channels_, 1, false, useGpu_); + Matrix::resizeOrCreate(channelBuffer_, channels_, 1, false, useGpu_); + Matrix::resizeOrCreate(sampleBuffer_, channels_, spatialDim, false, useGpu_); + scaleDiff_->zeroMem(); + for (size_t i = 0; i < batchSize; i++) { + MatrixPtr outGTmp = createSampleMatrix(outG, i, spatialDim); + const MatrixPtr dataTmp = createSampleMatrix(dataBuffer_, i, spatialDim); + const MatrixPtr inVTmp = createSampleMatrix(inV, i, spatialDim); + const MatrixPtr inGTmp = createSampleMatrix(inG, i, spatialDim); + const MatrixPtr normTmp = createSpatialMatrix(normBuffer_, i, spatialDim); + + channelBuffer_->sumRows(*dataTmp, 1, 0); + channelBuffer_->dotDiv(*channelBuffer_, *(scale_->getW())); + // store a / scale[i] in scaleDiff_ temporary + scaleDiff_->add(*channelBuffer_, 1.); + + sampleBuffer_->dotMul(*inVTmp, *outGTmp); + spatialBuffer_->sumCols(*sampleBuffer_, 1., 1.); + // scale the grad + inGTmp->copyFrom(*inVTmp); + inGTmp->mulRowVector(*spatialBuffer_); + // divide by square of norm + spatialBuffer_->dotMul(*normTmp, *normTmp); + inGTmp->divRowVector(*spatialBuffer_); + // subtract + inGTmp->add(*outGTmp, -1, 1); + // divide by norm + inGTmp->divRowVector(*normTmp); + // scale the diff + inGTmp->mulColVector(*scale_->getW()); + } + // updata scale + if (scale_->getWGrad()) scale_->getWGrad()->copyFrom(*scaleDiff_); + scale_->getParameterPtr()->incUpdate(callback); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/CudnnConvLayer.cpp b/paddle/gserver/layers/CudnnConvBaseLayer.cpp similarity index 66% rename from paddle/gserver/layers/CudnnConvLayer.cpp rename to paddle/gserver/layers/CudnnConvBaseLayer.cpp index 978c2c1479c64ab2cdebaaff7394059b3d033ab6..24363bb8b09cc354c25abe512257be68566c10e1 100644 --- a/paddle/gserver/layers/CudnnConvLayer.cpp +++ b/paddle/gserver/layers/CudnnConvBaseLayer.cpp @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "CudnnConvLayer.h" +#include "CudnnConvBaseLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { +REGISTER_LAYER(cudnn_conv, CudnnConvBaseLayer); +REGISTER_LAYER(cudnn_convt, CudnnConvBaseLayer); -REGISTER_LAYER(cudnn_conv, CudnnConvLayer); - -bool CudnnConvLayer::init(const LayerMap &layerMap, - const ParameterMap ¶meterMap) { +bool CudnnConvBaseLayer::init(const LayerMap &layerMap, + const ParameterMap ¶meterMap) { if (!ConvBaseLayer::init(layerMap, parameterMap)) return false; CHECK(useGpu_) << "CudnnConvLayer only support gpu"; @@ -33,7 +33,11 @@ bool CudnnConvLayer::init(const LayerMap &layerMap, CHECK(config_.shared_biases()); for (size_t i = 0; i < inputLayers_.size(); i++) { ProjectionConfig *conf = new ProjectionConfig(); - conf->set_type("conv"); + if (isDeconv_) { + conf->set_type("convt"); + } else { + conf->set_type("conv"); + } conf->set_num_filters(numFilters_); ConvConfig *convConf = conf->mutable_conv_conf(); *convConf = *(config_.mutable_inputs(i)->mutable_conv_conf()); @@ -47,14 +51,13 @@ bool CudnnConvLayer::init(const LayerMap &layerMap, if (biases_.get() && sharedBiases_) { hl_create_tensor_descriptor(&biasDesc_); hl_create_tensor_descriptor(&outputDesc_); - hl_tensor_reshape(biasDesc_, 1, numFilters_ / groups_[0], 1, 1); - biasOffset_ = numFilters_ / groups_[0]; + hl_tensor_reshape(biasDesc_, 1, numFilters_, 1, 1); } return true; } -void CudnnConvLayer::forward(PassType passType) { +void CudnnConvBaseLayer::forward(PassType passType) { Layer::forward(passType); int batchSize = getInput(0).getBatchSize(); @@ -67,37 +70,41 @@ void CudnnConvLayer::forward(PassType passType) { if (biases_) { REGISTER_TIMER_INFO("CudnnConvBiasTimer", getName().c_str()); int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); + int outH, outW; + if (isDeconv_) { + outH = imgSizeH_[0]; + outW = imgSizeW_[0]; + } else { + outH = outputH_[0]; + outW = outputW_[0]; + } + hl_tensor_reshape(outputDesc_, batchSize, - numFilters_ / groups_[0], - outputH_[0], - outputW_[0], - numFilters_ * outputH_[0] * outputW_[0], - outputH_[0] * outputW_[0], - outputW_[0], + numFilters_, + outH, + outW, + numFilters_ * outH * outW, + outH * outW, + outW, 1); - outputOffset_ = getOutputValue()->getWidth() / groups_[0]; - for (int g = 0; g < groups_[0]; ++g) { - real *biasData = biases_->getW()->getData() + biasOffset_ * g; - real *outData = getOutputValue()->getData() + outputOffset_ * g; - hl_convolution_forward_add_bias( - biasDesc_, biasData, outputDesc_, outData); - } + real *outData = getOutputValue()->getData(); + real *biasData = biases_->getW()->getData(); + hl_convolution_forward_add_bias(biasDesc_, biasData, outputDesc_, outData); } forwardActivation(); } -void CudnnConvLayer::backward(const UpdateCallback &callback) { +void CudnnConvBaseLayer::backward(const UpdateCallback &callback) { backwardActivation(); if (biases_ && biases_->getWGrad()) { REGISTER_TIMER_INFO("CudnnConvBpBiasTimer", getName().c_str()); - for (int g = 0; g < groups_[0]; ++g) { - real *biasGrad = biases_->getWGrad()->getData() + biasOffset_ * g; - real *outGrad = getOutputGrad()->getData() + outputOffset_ * g; - hl_convolution_backward_bias(biasDesc_, biasGrad, outputDesc_, outGrad); - } + real *biasGrad = biases_->getWGrad()->getData(); + real *outGrad = getOutputGrad()->getData(); + hl_convolution_backward_bias(biasDesc_, biasGrad, outputDesc_, outGrad); + biases_->getParameterPtr()->incUpdate(callback); } @@ -106,7 +113,7 @@ void CudnnConvLayer::backward(const UpdateCallback &callback) { } } -CudnnConvLayer::~CudnnConvLayer() { +CudnnConvBaseLayer::~CudnnConvBaseLayer() { if (biases_) { hl_destroy_tensor_descriptor(biasDesc_); hl_destroy_tensor_descriptor(outputDesc_); diff --git a/paddle/gserver/layers/CudnnConvLayer.h b/paddle/gserver/layers/CudnnConvBaseLayer.h similarity index 86% rename from paddle/gserver/layers/CudnnConvLayer.h rename to paddle/gserver/layers/CudnnConvBaseLayer.h index 919b1efc4e453219a6c2ab1a11c61ccb99404084..93a05f94c7717f9170818b9d5ce3d27a6d18cef5 100644 --- a/paddle/gserver/layers/CudnnConvLayer.h +++ b/paddle/gserver/layers/CudnnConvBaseLayer.h @@ -30,27 +30,24 @@ namespace paddle { * * The config file api is img_conv_layer. */ -class CudnnConvLayer : public ConvBaseLayer { +class CudnnConvBaseLayer : public ConvBaseLayer { protected: std::vector> projConf_; std::vector> projections_; hl_tensor_descriptor biasDesc_; hl_tensor_descriptor outputDesc_; - int biasOffset_; - int outputOffset_; public: - explicit CudnnConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} + explicit CudnnConvBaseLayer(const LayerConfig& config) + : ConvBaseLayer(config) {} - ~CudnnConvLayer(); + ~CudnnConvBaseLayer(); + void forward(PassType passType) override; + void backward(const UpdateCallback& callback) override; bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; - void forward(PassType passType) override; - void backward(const UpdateCallback& callback) override; - void addBiases(); - void bpropBiases(); }; } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp index 9ddccc202705c024076db795a9aeda0c823e9399..fdcf994cdb47f2409b045a1337332e2f4c304fbc 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -107,6 +107,10 @@ void ExpandConvBaseLayer::expandOneFrame(MatrixPtr image, int channel = isDeconv_ ? numFilters_ : channels_[inIdx]; resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); + + CHECK_EQ(image->getWidth(), + static_cast(imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel)); + real *imgData = image->getData() + startIdx * image->getWidth(); MatrixPtr imageTmp = Matrix::create(imgData, diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.h b/paddle/gserver/layers/HierarchicalSigmoidLayer.h index 3f6875fb9f007c0938bfcd7cad99c73b4ba1511b..9afd40b1674680da962d6e51caa56b46279b70de 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.h +++ b/paddle/gserver/layers/HierarchicalSigmoidLayer.h @@ -36,7 +36,7 @@ namespace paddle { * | |- 5 * | * |-*- 0 - * |- 1 + * |- 1 * @endcode * * where * indicates an internal node, and each leaf node represents a class. diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index f76d41ad3e8a3b1730f9d50c0773ee4f61ddb541..125aaf947f3c9d976b117667d1d1b7700a029cc6 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -381,8 +381,7 @@ void Layer::backwardActivation() { void Layer::forwardDropOut() { auto& outV = getOutputValue(); - if (passType_ == PASS_TRAIN || passType_ == PASS_METRIC_TRAIN || - passType_ == PASS_METRIC_TRAIN_WITH_NOERROR) { + if (passType_ == PASS_TRAIN) { // new dropOutMask_ if dropOutMask_ is null ptr Matrix::resizeOrCreate(dropOutMask_, outV->getHeight(), diff --git a/paddle/gserver/layers/LinearChainCRF.cpp b/paddle/gserver/layers/LinearChainCRF.cpp index b7f748f3bb8a419429956724131e81dfdbd274c6..dc3dc156792bdf32c3b948a292597d0e9eca5d8b 100644 --- a/paddle/gserver/layers/LinearChainCRF.cpp +++ b/paddle/gserver/layers/LinearChainCRF.cpp @@ -17,18 +17,12 @@ limitations under the License. */ namespace paddle { -LinearChainCRF::LinearChainCRF(int numClasses, real* para, real* grad) +LinearChainCRF::LinearChainCRF(int numClasses, real* para) : numClasses_(numClasses) { a_ = Matrix::create(para, 1, numClasses_); b_ = Matrix::create(para + numClasses_, 1, numClasses_); w_ = Matrix::create(para + 2 * numClasses_, numClasses_, numClasses_); - if (grad) { - da_ = Matrix::create(grad, 1, numClasses_); - db_ = Matrix::create(grad + numClasses_, 1, numClasses_); - dw_ = Matrix::create(grad + 2 * numClasses_, numClasses_, numClasses_); - } - ones_ = Matrix::create(1, numClasses_); ones_->one(); @@ -107,19 +101,24 @@ real LinearChainCRF::forward(real* x, int* s, int length) { return -ll; } -void LinearChainCRF::backward(real* x, real* dx, int* s, int length) { +void LinearChainCRF::backward(real* x, int* s, int length, bool needWGrad) { MatrixPtr matX = Matrix::create(x, length, numClasses_); - MatrixPtr matDX = Matrix::create(dx, length, numClasses_); - MatrixPtr matGrad = Matrix::create(length, numClasses_); + Matrix::resizeOrCreate(matGrad_, length, numClasses_); Matrix::resizeOrCreate(beta_, length, numClasses_); real* b = b_->getData(); - real* dw = dw_ ? dw_->getData() : nullptr; + if (needWGrad) { + Matrix::resizeOrCreate(matWGrad_, numClasses_ + 2, numClasses_); + matWGrad_->zeroMem(); + da_ = matWGrad_->subRowMatrix(0, 1); + db_ = matWGrad_->subRowMatrix(1, 2); + dw_ = matWGrad_->subRowMatrix(2, numClasses_ + 2); + } real* alpha = alpha_->getData(); real* beta = beta_->getData(); real* expW = expW_->getData(); real* expX = expX_->getData(); - real* grad = matGrad->getData(); + real* grad = matGrad_->getData(); for (int i = 0; i < numClasses_; ++i) { beta[(length - 1) * numClasses_ + i] = exp(b[i]); @@ -140,39 +139,38 @@ void LinearChainCRF::backward(real* x, real* dx, int* s, int length) { normalizeL1(beta + k * numClasses_, numClasses_); } - matGrad->dotMul(*alpha_, *beta_); - matGrad->rowNormalizeL1(*matGrad); + matGrad_->dotMul(*alpha_, *beta_); + matGrad_->rowNormalizeL1(*matGrad_); for (int k = 0; k < length; ++k) { grad[k * numClasses_ + s[k]] -= (real)1; } - matDX->add(*matGrad); - if (da_) { - da_->add(*matGrad->subMatrix(/* startRow= */ 0, /* numRows= */ 1)); - } - if (db_) { - db_->add(*matGrad->subMatrix(/* startRow= */ length - 1, 1)); - } - beta_->dotMul(*beta_, *expX_); - beta_->rowNormalizeL1(*beta_); + if (needWGrad) { + da_->add(*matGrad_->subMatrix(/* startRow= */ 0, /* numRows= */ 1)); + db_->add(*matGrad_->subMatrix(/* startRow= */ length - 1, 1)); - for (int k = 1; dw && k < length; ++k) { - real sum = 0; - for (int i = 0; i < numClasses_; ++i) { - for (int j = 0; j < numClasses_; ++j) { - sum += expW[i * numClasses_ + j] * alpha[(k - 1) * numClasses_ + i] * - beta[k * numClasses_ + j]; + beta_->dotMul(*beta_, *expX_); + beta_->rowNormalizeL1(*beta_); + + real* dw = dw_->getData(); + for (int k = 1; k < length; ++k) { + real sum = 0; + for (int i = 0; i < numClasses_; ++i) { + for (int j = 0; j < numClasses_; ++j) { + sum += expW[i * numClasses_ + j] * alpha[(k - 1) * numClasses_ + i] * + beta[k * numClasses_ + j]; + } } - } - sum = 1 / sum; - for (int i = 0; i < numClasses_; ++i) { - for (int j = 0; j < numClasses_; ++j) { - dw[i * numClasses_ + j] += sum * expW[i * numClasses_ + j] * - alpha[(k - 1) * numClasses_ + i] * - beta[k * numClasses_ + j]; + sum = 1 / sum; + for (int i = 0; i < numClasses_; ++i) { + for (int j = 0; j < numClasses_; ++j) { + dw[i * numClasses_ + j] += sum * expW[i * numClasses_ + j] * + alpha[(k - 1) * numClasses_ + i] * + beta[k * numClasses_ + j]; + } } + dw[s[k - 1] * numClasses_ + s[k]] -= (real)1; } - dw[s[k - 1] * numClasses_ + s[k]] -= (real)1; } } diff --git a/paddle/gserver/layers/LinearChainCRF.h b/paddle/gserver/layers/LinearChainCRF.h index a905bf803dd5443ef8d4ad7702720a50a5220a9a..8daf1e14a6fa98bef41f4f32bff439df8302adfd 100644 --- a/paddle/gserver/layers/LinearChainCRF.h +++ b/paddle/gserver/layers/LinearChainCRF.h @@ -21,7 +21,7 @@ namespace paddle { class LinearChainCRF { public: /** - * The size of para and grad must be \f$(numClasses + 2) * numClasses\f$. + * The size of para must be \f$(numClasses + 2) * numClasses\f$. * The first numClasses values of para are for starting weights (\f$a\f$). * The next numClasses values of para are for ending weights (\f$b\f$), * The remaning values are for transition weights (\f$w\f$). @@ -34,7 +34,7 @@ public: * all possible * sequences is \f$1\f$, and \f$x\f$ is the input feature to the CRF. */ - LinearChainCRF(int numClasses, real* para, real* grad); + LinearChainCRF(int numClasses, real* para); /** * Calculate the negative log likelihood of s given x. @@ -45,29 +45,45 @@ public: /** * Calculate the gradient with respect to x, a, b, and w. - * The gradient of x will be stored in dx. * backward() can only be called after a corresponding call to forward() with * the same x, s and length. - * @note The gradient is added to dx and grad (provided at constructor). + * The gradient with respect to a, b, and w will not be calculated if + * needWGrad is false. + * @note Please call getWGrad() and getXGrad() to get the gradient with + * respect to (a, b, w) and x respectively. */ - void backward(real* x, real* dx, int* s, int length); + void backward(real* x, int* s, int length, bool needWGrad); /** * Find the most probable sequence given x. The result will be stored in s. */ void decode(real* x, int* s, int length); + /* + * Return the gradient with respect to (a, b, w). It can only be called after + * a corresponding call to backward(). + */ + MatrixPtr getWGrad() { return matWGrad_; } + + /* + * Return the gradient with respect to x. It can only be called after a + * corresponding call to backward(). + */ + MatrixPtr getXGrad() { return matGrad_; } + protected: int numClasses_; MatrixPtr a_; MatrixPtr b_; MatrixPtr w_; + MatrixPtr matWGrad_; MatrixPtr da_; MatrixPtr db_; MatrixPtr dw_; MatrixPtr ones_; MatrixPtr expX_; + MatrixPtr matGrad_; MatrixPtr alpha_; MatrixPtr beta_; MatrixPtr maxX_; diff --git a/paddle/gserver/layers/NormLayer.cpp b/paddle/gserver/layers/NormLayer.cpp index 3db0af2515ee9f64aa6c0b0a441e88562d9e398e..e094078bfe86e30c06e1b80ebc04c8213fe9abcf 100644 --- a/paddle/gserver/layers/NormLayer.cpp +++ b/paddle/gserver/layers/NormLayer.cpp @@ -26,6 +26,8 @@ Layer* NormLayer::create(const LayerConfig& config) { return new ResponseNormLayer(config); } else if (norm == "cmrnorm-projection") { return new CMRProjectionNormLayer(config); + } else if (norm == "cross-channel-norm") { + return new CrossChannelNormLayer(config); } else { LOG(FATAL) << "Unknown norm type: " << norm; return nullptr; @@ -54,4 +56,14 @@ bool ResponseNormLayer::init(const LayerMap& layerMap, return true; } +bool CrossChannelNormLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + Layer::init(layerMap, parameterMap); + CHECK(parameters_[0]); + const NormConfig& conf = config_.inputs(0).norm_conf(); + channels_ = conf.channels(); + scale_.reset(new Weight(channels_, 1, parameters_[0])); + return true; +} + } // namespace paddle diff --git a/paddle/gserver/layers/NormLayer.h b/paddle/gserver/layers/NormLayer.h index e77faaa322570933b3ea2de877b7859857306432..7c238ac944e52c3a83c2aa5deac18de3aff6db61 100644 --- a/paddle/gserver/layers/NormLayer.h +++ b/paddle/gserver/layers/NormLayer.h @@ -65,4 +65,35 @@ public: } }; +/** + * This layer applys normalization across the channels of each sample to a + * conv layer's output, and scales the output by a group of trainable factors + * whose dimensions equal to the number of channels. + * - Input: One and only one input layer are accepted. + * - Output: The normalized data of the input data. + * Reference: + * Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, + * Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector + */ +class CrossChannelNormLayer : public NormLayer { +public: + explicit CrossChannelNormLayer(const LayerConfig& config) + : NormLayer(config) {} + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + void forward(PassType passType); + void backward(const UpdateCallback& callback); + MatrixPtr createSampleMatrix(MatrixPtr data, size_t iter, size_t spatialDim); + MatrixPtr createSpatialMatrix(MatrixPtr data, size_t iter, size_t spatialDim); + +protected: + size_t channels_; + std::unique_ptr scale_; + MatrixPtr scaleDiff_; + MatrixPtr normBuffer_; + MatrixPtr dataBuffer_; + MatrixPtr channelBuffer_; + MatrixPtr spatialBuffer_; + MatrixPtr sampleBuffer_; +}; + } // namespace paddle diff --git a/paddle/gserver/layers/PadLayer.cpp b/paddle/gserver/layers/PadLayer.cpp index bb618c09f9777785d93995fa7140dd4a5383cd1b..a5ed7e057aea8f065ee752f8c0f0d2d9bdddfc8b 100644 --- a/paddle/gserver/layers/PadLayer.cpp +++ b/paddle/gserver/layers/PadLayer.cpp @@ -36,12 +36,9 @@ bool PadLayer::init(const LayerMap& layerMap, CHECK_EQ(2, pad_conf.pad_c_size()); CHECK_EQ(2, pad_conf.pad_h_size()); CHECK_EQ(2, pad_conf.pad_w_size()); - padc_.push_back(pad_conf.pad_c(0)); - padc_.push_back(pad_conf.pad_c(1)); - padh_.push_back(pad_conf.pad_h(0)); - padh_.push_back(pad_conf.pad_h(1)); - padw_.push_back(pad_conf.pad_w(0)); - padw_.push_back(pad_conf.pad_w(1)); + padc_ = {pad_conf.pad_c(0), pad_conf.pad_c(1)}; + padh_ = {pad_conf.pad_h(0), pad_conf.pad_h(1)}; + padw_ = {pad_conf.pad_w(0), pad_conf.pad_w(1)}; outDims_ = TensorShape(4); setOutDims(0); @@ -49,21 +46,15 @@ bool PadLayer::init(const LayerMap& layerMap, createFunction(forward_, "Pad", FuncConfig() - .set("cstart", padc_[0]) - .set("cend", padc_[1]) - .set("hstart", padh_[0]) - .set("hend", padh_[1]) - .set("wstart", padw_[0]) - .set("wend", padw_[1])); + .set("channel", padc_) + .set("height", padh_) + .set("width", padw_)); createFunction(backward_, "PadGrad", FuncConfig() - .set("cstart", padc_[0]) - .set("cend", padc_[1]) - .set("hstart", padh_[0]) - .set("hend", padh_[1]) - .set("wstart", padw_[0]) - .set("wend", padw_[1])); + .set("channel", padc_) + .set("height", padh_) + .set("width", padw_)); return true; } diff --git a/paddle/gserver/layers/PadLayer.h b/paddle/gserver/layers/PadLayer.h index b2bbf28082e630aeb429ee997a1d43ce7ba05d1c..fe9388d8cc260ed599af0113361f4687f3f4a18b 100644 --- a/paddle/gserver/layers/PadLayer.h +++ b/paddle/gserver/layers/PadLayer.h @@ -38,9 +38,9 @@ protected: void setOutDims(const size_t batchSize); void setTensorDim(const size_t batchSize); - std::vector padc_; - std::vector padh_; - std::vector padw_; + std::vector padc_; + std::vector padh_; + std::vector padw_; TensorShape inDims_; TensorShape outDims_; }; diff --git a/paddle/gserver/layers/PriorBox.cpp b/paddle/gserver/layers/PriorBox.cpp index bcf5e912a50fef2cec8ebdf1e0dad9efa43fba2f..331bc7672ec0d39a7317c39f1d14e8dcadea471a 100644 --- a/paddle/gserver/layers/PriorBox.cpp +++ b/paddle/gserver/layers/PriorBox.cpp @@ -20,7 +20,7 @@ namespace paddle { /** * @brief A layer for generating priorbox locations and variances. * - Input: Two and only two input layer are accepted. The input layer must be - * be a data output layer and a convolution output layer. + * be a data output layer and a convolution output layer. * - Output: The priorbox locations and variances of the input data. * Reference: * Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, @@ -45,27 +45,32 @@ protected: MatrixPtr buffer_; }; +REGISTER_LAYER(priorbox, PriorBoxLayer); + bool PriorBoxLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { Layer::init(layerMap, parameterMap); auto pbConf = config_.inputs(0).priorbox_conf(); + std::vector tmp; + aspectRatio_.push_back(1.); std::copy(pbConf.min_size().begin(), pbConf.min_size().end(), std::back_inserter(minSize_)); std::copy(pbConf.max_size().begin(), pbConf.max_size().end(), std::back_inserter(maxSize_)); - std::copy(pbConf.aspect_ratio().begin(), - pbConf.aspect_ratio().end(), - std::back_inserter(aspectRatio_)); std::copy(pbConf.variance().begin(), pbConf.variance().end(), std::back_inserter(variance_)); + std::copy(pbConf.aspect_ratio().begin(), + pbConf.aspect_ratio().end(), + std::back_inserter(tmp)); // flip - int inputRatioLength = aspectRatio_.size(); - for (int index = 0; index < inputRatioLength; index++) - aspectRatio_.push_back(1 / aspectRatio_[index]); - aspectRatio_.push_back(1.); + int inputRatioLength = tmp.size(); + for (int index = 0; index < inputRatioLength; index++) { + aspectRatio_.push_back(tmp[index]); + aspectRatio_.push_back(1 / tmp[index]); + } numPriors_ = aspectRatio_.size(); if (maxSize_.size() > 0) numPriors_++; return true; @@ -94,12 +99,12 @@ void PriorBoxLayer::forward(PassType passType) { for (int w = 0; w < layerWidth; ++w) { real centerX = (w + 0.5) * stepW; real centerY = (h + 0.5) * stepH; - int minSize = 0; + real minSize = 0; for (size_t s = 0; s < minSize_.size(); s++) { // first prior. minSize = minSize_[s]; - int boxWidth = minSize; - int boxHeight = minSize; + real boxWidth = minSize; + real boxHeight = minSize; // xmin, ymin, xmax, ymax. tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; @@ -112,7 +117,7 @@ void PriorBoxLayer::forward(PassType passType) { CHECK_EQ(minSize_.size(), maxSize_.size()); // second prior. for (size_t s = 0; s < maxSize_.size(); s++) { - int maxSize = maxSize_[s]; + real maxSize = maxSize_[s]; boxWidth = boxHeight = sqrt(minSize * maxSize); tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; @@ -145,6 +150,5 @@ void PriorBoxLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); outV->copyFrom(buffer_->data_, dim * 2); } -REGISTER_LAYER(priorbox, PriorBoxLayer); } // namespace paddle diff --git a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp index 7a13cd7ad0fecf202613d8da365ea832b41ab04e..944c7051668dccf39dd2ace14986d43c8a14e452 100644 --- a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp +++ b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp @@ -25,6 +25,11 @@ namespace paddle { * Input: a sequence * If SequenceLevel = kNonseq: * Output: a sequence containing only the last instance of the input sequence + * If stride_ > 0: + * Output: a shorten sequence. The operation of getting last instance of a + * sequence is independently performed on every slice of the input + * sequence, which is obtained by sliding a window with the window + * size set to stride_. * If SequenceLevel = kSeq: * Check input sequence must has sub-sequence * Output: a sequence containing only the last instance of each sub-sequence @@ -37,6 +42,7 @@ class SequenceLastInstanceLayer : public SequencePoolLayer { protected: MatrixPtr tmpSrc_; MatrixPtr tmpDest_; + std::vector instanceIds_; public: explicit SequenceLastInstanceLayer(const LayerConfig& config) @@ -54,6 +60,7 @@ REGISTER_LAYER(seqlastins, SequenceLastInstanceLayer); bool SequenceLastInstanceLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { SequencePoolLayer::init(layerMap, parameterMap); + reversed_ = config_.select_first(); tmpSrc_ = Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, useGpu_); @@ -66,7 +73,8 @@ bool SequenceLastInstanceLayer::init(const LayerMap& layerMap, void SequenceLastInstanceLayer::forward(PassType passType) { SequencePoolLayer::forward(passType); - const int* starts = startPositions_->getData(false); + auto starts = (stride_ > 0) ? stridePositions_->getData() + : startPositions_->getData(false); MatrixPtr inputValue = getInputValue(0); MatrixPtr outputValue = getOutputValue(); @@ -74,9 +82,10 @@ void SequenceLastInstanceLayer::forward(PassType passType) { AsyncGpuBlock asyncGpuBlock; REGISTER_TIMER_INFO("SequenceLastInstanceLayerForward", getName().c_str()); + instanceIds_.clear(); for (size_t seqId = 0; seqId < newBatchSize_; ++seqId) { - int insId = - config_.select_first() ? starts[seqId] : starts[seqId + 1] - 1; + int insId = reversed_ ? starts[seqId] : starts[seqId + 1] - 1; + instanceIds_.push_back(insId); outputValue->subMatrix(seqId, 1, tmpDest_) ->assign(*(inputValue->subMatrix(insId, 1, tmpSrc_))); @@ -96,18 +105,13 @@ void SequenceLastInstanceLayer::backward(const UpdateCallback& callback) { MatrixPtr inputGrad = getInputGrad(0); MatrixPtr outputGrad = getOutputGrad(); - const int* starts = startPositions_->getData(false); - size_t numSequences = startPositions_->getSize() - 1; if (inputGrad) { AsyncGpuBlock asyncGpuBlock; REGISTER_TIMER_INFO("SequenceLastInstanceLayerBackward", getName().c_str()); - for (size_t seqId = 0; seqId < numSequences; ++seqId) { - int insId = - config_.select_first() ? starts[seqId] : starts[seqId + 1] - 1; - - inputGrad->subMatrix(insId, 1, tmpDest_) + for (size_t seqId = 0; seqId < newBatchSize_; ++seqId) { + inputGrad->subMatrix(instanceIds_[seqId], 1, tmpDest_) ->add(*(outputGrad->subMatrix(seqId, 1, tmpSrc_))); } } diff --git a/paddle/gserver/layers/SequencePoolLayer.cpp b/paddle/gserver/layers/SequencePoolLayer.cpp index 35260ca912d5d0e00213ffb7074bd8963da265da..8c49502011582b534a2ba4113ffeffaa2f06a51c 100644 --- a/paddle/gserver/layers/SequencePoolLayer.cpp +++ b/paddle/gserver/layers/SequencePoolLayer.cpp @@ -37,6 +37,7 @@ bool SequencePoolLayer::init(const LayerMap& layerMap, } else { LOG(FATAL) << "Unknown trans_type: " << config_.trans_type(); } + stride_ = config_.seq_pool_stride(); setNeedSequenceInfo(false); return true; } @@ -55,19 +56,25 @@ void SequencePoolLayer::forward(PassType passType) { CHECK_EQ(starts->getData()[newBatchSize_], input.getBatchSize()); CHECK_EQ(newBatchSize_, starts->getSize() - 1); - resetOutput(newBatchSize_, dim); - if (type_) { - CHECK(input.subSequenceStartPositions) - << "when trans_type = seq, input must hasSubseq"; - } /* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq, * thus, in this case, output_ has no sequenceStartPositions. * If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this * case, we should compute the new sequenceStartPositions. */ if (type_) { - output_.degradeSequence(input, useGpu_); + CHECK(input.subSequenceStartPositions) + << "when trans_type = seq, input must hasSubseq"; + output_.degradeSequence(input); + } + if (stride_ > 0) { + CHECK_EQ(input.hasSubseq(), 0UL) + << "sequence stride pooling is invalid for hasSubseq now"; + output_.poolSequenceWithStride( + input, stride_, &stridePositions_, reversed_); + newBatchSize_ = stridePositions_->getSize() - 1; } + + resetOutput(newBatchSize_, dim); } void SequencePoolLayer::backward(const UpdateCallback& callback) { diff --git a/paddle/gserver/layers/SequencePoolLayer.h b/paddle/gserver/layers/SequencePoolLayer.h index 85b51ccd1dc7e7eb7aa9344b0f7ec6f70a35a0b4..293d1bf27823ffb0ebddba95461883d646f159ae 100644 --- a/paddle/gserver/layers/SequencePoolLayer.h +++ b/paddle/gserver/layers/SequencePoolLayer.h @@ -26,6 +26,10 @@ namespace paddle { * Output: output size is the number of input sequences (NOT input instances) * output[i] = seqlastin/average/max_{for each instance in this * sequence}{input[i]} + * If stride_ > 0: + * Check input sequence must not have sub-sequence + * Output: a shorten sequence, pooling is performed upon a small local + * area * If SequenceLevel = kSeq: * Check input sequence must has sub-sequence * Output: output size is the number of input sub-sequences @@ -42,6 +46,11 @@ protected: enum SequenceLevel { kNonSeq = 0, kSeq = 1 }; size_t newBatchSize_; ICpuGpuVectorPtr startPositions_; + int stride_; + // Store the start position of each window. + IVectorPtr stridePositions_; + // Whether the input sequence is reversed or not. + bool reversed_ = false; public: explicit SequencePoolLayer(const LayerConfig& config) : Layer(config) {} diff --git a/paddle/gserver/layers/TransLayer.cpp b/paddle/gserver/layers/TransLayer.cpp index d1fa90f38415c53bd1c56df4a6c4be0508004bc6..4150f1727d8a1a3c1ed21b01944040977d2db315 100644 --- a/paddle/gserver/layers/TransLayer.cpp +++ b/paddle/gserver/layers/TransLayer.cpp @@ -56,7 +56,14 @@ void TransLayer::backward(const UpdateCallback& callback) { return; } MatrixPtr preGrad = getInputGrad(0); - outputGrad->transpose(preGrad, false); + if (preGrad) { + MatrixPtr transGrad = Matrix::create(preGrad->getHeight(), + preGrad->getWidth(), + /* trans= */ false, + preGrad->useGpu()); + outputGrad->transpose(transGrad, false); + preGrad->add(*transGrad); + } } } // namespace paddle diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index 0caa5e1e11e6d42fadfa87149814c4b77b3b6271..3c4128b5b8a0ea420bd3027b9a36e5f75087c3cb 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -18,6 +18,14 @@ add_unittest_without_exec(test_LayerGrad add_test(NAME test_LayerGrad COMMAND test_LayerGrad) +################ test_CRFLayerGrad #################### +add_unittest_without_exec(test_CRFLayerGrad + test_CRFLayerGrad.cpp + LayerGradUtil.cpp) +add_test(NAME test_CRFLayerGrad + COMMAND test_CRFLayerGrad) + + add_unittest_without_exec(test_ActivationGrad test_ActivationGrad.cpp LayerGradUtil.cpp) diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index ae016e74eaa84f7c43a30c09c8c4577e25360c4e..a0b1cd471dd02fd20bb2247395bdb74651610bbf 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -24,7 +24,7 @@ real getCostSum(LayerPtr& testLayer, MatrixPtr weights) { if (weights) { outArgs[0].value->dotMul(*outArgs[0].value, *weights); } - return Argument::sumCosts(outArgs); + return Argument::sum(outArgs); } real getDiffAndPrint(real newCost1, @@ -241,7 +241,7 @@ void testBatchState(LayerPtr testLayer, std::vector args; args.push_back(out); - EXPECT_EQ(0, Argument::sumCosts(args)) << "testBatchState failed"; + EXPECT_EQ(0, Argument::sum(args)) << "testBatchState failed"; for (size_t seqId = 0; seqId < numSequences; ++seqId) { start[seqId] += seqLens[seqId]; } @@ -672,7 +672,7 @@ void testLayerGradKernel(TestConfig testConf, outArgs[0].value->dotMul(*testLayer->getOutput().value, *weights); } - real cost = Argument::sumCosts(outArgs); + real cost = Argument::sum(outArgs); LOG(INFO) << " cost " << cost; EXPECT_FALSE(std::isnan(cost)); @@ -778,8 +778,10 @@ void testProjectionGrad(ProjectionConfig conf, config.biasSize = biasSize == 0 ? config.layerConfig.size() : biasSize; config.layerConfig.set_bias_size(config.biasSize); config.layerConfig.set_shared_biases(sharedBias); - config.inputDefs.push_back( - {inputType, "layer_0", conf.input_size(), parameterSize}); + config.inputDefs.push_back({inputType, + "layer_0", + static_cast(conf.input_size()), + parameterSize}); *config.layerConfig.add_inputs()->mutable_proj_conf() = conf; config.testState = testState; testLayerGrad(config, "mixed", batchSize, false, useGpu); diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df14449291e9ec08f45718de07bbb101f6dbea58 --- /dev/null +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -0,0 +1,174 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "ModelConfig.pb.h" +#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/gserver/layers/LinearChainCRF.h" +#include "paddle/trainer/Trainer.h" + +#include "LayerGradUtil.h" +#include "paddle/testing/TestUtil.h" + +using namespace paddle; // NOLINT + +DECLARE_int32(gpu_id); +DECLARE_bool(thread_local_rand_use_global_seed); + +static inline bool getNextSequence(std::vector& seq, int numClasses) { + for (auto& v : seq) { + if (++v < numClasses) { + return true; + } + v = 0; + } + return false; +} + +// log(exp(x) + exp(y)) +static inline real logSum(real x, real y) { + real maxValue = std::max(x, y); + if (std::isinf(maxValue)) { + return -std::numeric_limits::infinity(); + } else { + return maxValue + log(exp(x - maxValue) + exp(y - maxValue)); + } +} + +static inline std::vector genRandLabels(int numClasses, int length) { + std::vector labels(length); + for (int i = 0; i < length; ++i) { + labels[i] = rand() % numClasses; // NOLINT + } + return labels; +} + +TEST(CRFLayer, cost) { + const int numClasses = 4; + CpuVector para(numClasses * (numClasses + 2)); + real* a = para.getData(); + real* b = para.getData() + numClasses; + real* w = para.getData() + 2 * numClasses; + LinearChainCRF crf(4, para.getData()); + for (int length : {1, 2, 3, 10}) { + for (int tries = 0; tries < 10; ++tries) { + CpuMatrix x(length, numClasses); + x.randomizeUniform(); + para.randnorm(0, 2); + + std::vector goldenLabels = genRandLabels(numClasses, length); + + real cost = crf.forward(x.getData(), goldenLabels.data(), length); + + real logZ = -std::numeric_limits::infinity(); + real logNominator = -std::numeric_limits::infinity(); + std::vector testResult(length, 0); + do { + real score = a[testResult.front()]; + score += x.getElement(0, testResult.front()); + for (int k = 1; k < length; ++k) { + score += x.getElement(k, testResult[k]) + + w[numClasses * testResult[k - 1] + testResult[k]]; + } + score += b[testResult.back()]; + logZ = logSum(logZ, score); + + if (goldenLabels == testResult) { + logNominator = score; + } + } while (getNextSequence(testResult, numClasses)); + + real trueCost = -logNominator + logZ; + + real diff = fabs(trueCost - cost); + diff /= fabs(cost) < fabs(trueCost) ? fabs(cost) : fabs(trueCost); + VLOG(1) << "cost=" << cost << " trueCost=" << trueCost << " diff=" << diff + << std::endl; + if (typeid(real) == typeid(double)) { // NOLINT + EXPECT_LE(diff, 1e-10); + } else { + EXPECT_LE(diff, 5e-3); + } + } + } +} + +inline real epsilon() { return typeid(real) == typeid(double) ? 1e-10 : 0.06; } + +TestConfig initTestConfig(size_t numClasses, bool withWeight) { + TestConfig config; + config.layerConfig.set_type("crf"); + config.layerConfig.set_size(numClasses); + config.biasSize = 0; + + config.inputDefs.push_back({INPUT_SEQUENCE_DATA, + "layer_0", + numClasses, + numClasses * (numClasses + 2)}); + config.layerConfig.add_inputs(); + config.inputDefs.push_back( + {INPUT_SEQUENCE_LABEL, "layer_label", numClasses, 0}); + config.layerConfig.add_inputs(); + + if (withWeight) { + config.inputDefs.push_back({INPUT_DENSE_DIM_DATA, "layer_weight", 1, 0}); + config.layerConfig.add_inputs(); + } + + return config; +} + +TEST(Layer, CRFLayer) { + size_t numClasses = 10; + for (int tries = 0; tries < 5; ++tries) { + TestConfig config = initTestConfig(numClasses, /* withWeight= */ false); + for (int length : {1, 3, 100}) { + // Not support GPU now + testLayerGrad(config, + "crf", + length, + /* trans= */ false, + /* useGpu= */ false, + /* useWeight= */ false, + epsilon()); + } + } +} + +TEST(Layer, CRFLayerUseWeight) { + size_t numClasses = 10; + for (int tries = 0; tries < 5; ++tries) { + TestConfig config = initTestConfig(numClasses, /* withWeight= */ true); + for (int length : {1, 3, 100}) { + // Not support GPU now + testLayerGrad(config, + "crf", + length, + /* trans= */ false, + /* useGpu= */ false, + /* useWeight= */ false, + epsilon()); + } + } +} + +int main(int argc, char** argv) { + initMain(argc, argv); + hl_start(); + hl_init(FLAGS_gpu_id); + FLAGS_thread_local_rand_use_global_seed = true; + srand(1); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index 207fc0566fcf4a0d2e971f3c169a14a64146155b..54b72375b743fe025e0ded5fdbce5699a0b4be1a 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -34,8 +34,7 @@ DECLARE_double(checkgrad_eps); DECLARE_bool(thread_local_rand_use_global_seed); DECLARE_bool(prev_batch_state); -// Do one forward pass of convTrans layer and check to see if its output -// matches the given result +// Do one forward pass of ConvLayer using either exconv or cudnn_conv MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, @@ -46,22 +45,35 @@ MatrixPtr doOneConvTest(size_t imgSize, size_t groups, MatrixPtr& inputData, real* param, - bool useGpu) { + bool useGpu, + bool isDeconv = false) { TestConfig config; config.biasSize = numfilters; + string layerType; if (useGpu) { - config.layerConfig.set_type("cudnn_conv"); + layerType = (isDeconv) ? "cudnn_convt" : "cudnn_conv"; } else { - config.layerConfig.set_type("exconv"); + layerType = (isDeconv) ? "exconvt" : "exconv"; } + config.layerConfig.set_type(layerType); config.layerConfig.set_num_filters(numfilters); config.layerConfig.set_partial_sum(1); config.layerConfig.set_shared_biases(true); size_t weightSize = channel * filter_size * filter_size * config.layerConfig.num_filters() / groups; - config.inputDefs.push_back( - {INPUT_DATA, "layer_0", imgSize * imgSize * channel, weightSize}); + if (isDeconv) { + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", output_x * output_x * channel, weightSize}); + config.layerConfig.set_size(imgSize * imgSize * + config.layerConfig.num_filters()); + } else { + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", imgSize * imgSize * channel, weightSize}); + config.layerConfig.set_size(output_x * output_x * + config.layerConfig.num_filters()); + } + LayerInputConfig* input = config.layerConfig.add_inputs(); ConvConfig* conv = input->mutable_conv_conf(); conv->set_filter_size(filter_size); @@ -72,12 +84,15 @@ MatrixPtr doOneConvTest(size_t imgSize, conv->set_stride(stride); conv->set_stride_y(stride); conv->set_groups(groups); - conv->set_filter_channels(channel / groups); conv->set_img_size(imgSize); conv->set_output_x(output_x); - config.layerConfig.set_size(conv->output_x() * conv->output_x() * - config.layerConfig.num_filters()); + if (isDeconv) { + conv->set_filter_channels(numfilters / groups); + } else { + conv->set_filter_channels(channel / groups); + } + config.layerConfig.set_name("conv"); std::vector dataLayers; @@ -105,6 +120,8 @@ MatrixPtr doOneConvTest(size_t imgSize, TEST(Layer, convParaUnified) { #ifndef PADDLE_ONLY_CPU MatrixPtr input, resultCpu, resultGpu; + + /// TEST1 for conv /// input = Matrix::create(1, 4 * 4, false, false); real inputData[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; real param[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1}; @@ -121,7 +138,7 @@ TEST(Layer, convParaUnified) { /*groups*/ 1, input, param, - false); + /*useGpu*/ false); resultGpu = doOneConvTest(/* imgSize */ 4, /* output_x */ 2, @@ -133,9 +150,42 @@ TEST(Layer, convParaUnified) { /*groups*/ 1, input, param, - true); + /*useGpu*/ true); checkMatrixEqual(resultCpu, resultGpu); + /// TEST1 for deconv /// + input = Matrix::create(1, 2 * 2, false, false); + real inputDataT[] = {1, 2, 3, 4}; + input->setData(inputDataT); + + resultCpu = doOneConvTest(/* imgSize */ 4, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 3, + /*channel*/ 1, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param, + /*useGpu*/ false, + /*isDeconv*/ true); + + resultGpu = doOneConvTest(/* imgSize */ 4, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 3, + /*channel*/ 1, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param, + /*useGpu*/ true, + /*isDeconv*/ true); + checkMatrixEqual(resultCpu, resultGpu); + + /// TEST2 for conv /// input = Matrix::create(1, 3 * 3 * 2, false, false); real inputData2[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}; @@ -153,7 +203,7 @@ TEST(Layer, convParaUnified) { /*groups*/ 1, input, param2, - false); + /*useGpu*/ false); resultGpu = doOneConvTest(/* imgSize */ 3, /* output_x */ 2, @@ -165,9 +215,10 @@ TEST(Layer, convParaUnified) { /*groups*/ 1, input, param2, - true); + /*useGpu*/ true); checkMatrixEqual(resultCpu, resultGpu); + /// TEST3 for conv /// real param3[] = {1, 2, 3, 4, 4, 3, 2, 1}; resultCpu = doOneConvTest(/* imgSize */ 3, @@ -180,7 +231,66 @@ TEST(Layer, convParaUnified) { /*groups*/ 2, input, param3, - false); + /*useGpu*/ false); + + resultGpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 2, + input, + param3, + /*useGpu*/ true); + checkMatrixEqual(resultCpu, resultGpu); + + /// TEST2 for deconv /// + input = Matrix::create(1, 2 * 2 * 2, false, false); + real inputData2T[] = {1, 2, 3, 4, 5, 6, 7, 8}; + input->setData(inputData2T); + + resultCpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param2, + /*useGpu*/ false, + /*isDeconv*/ true); + + resultGpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param2, + /*useGpu*/ true, + /*isDeconv*/ true); + checkMatrixEqual(resultCpu, resultGpu); + + /// TEST3 for deconv /// + resultCpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 2, + input, + param3, + /*useGpu*/ false, + /*isDeconv*/ true); resultGpu = doOneConvTest(/* imgSize */ 3, /* output_x */ 2, @@ -192,7 +302,8 @@ TEST(Layer, convParaUnified) { /*groups*/ 2, input, param3, - true); + /*useGpu*/ true, + /*isDeconv*/ true); checkMatrixEqual(resultCpu, resultGpu); #endif } diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 14d9db52470b2828186eca04d303135910489266..0d7bd8c3b8522f897a4a9481a82f2d19e6a0f872 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -166,15 +166,19 @@ TEST(Projection, scaling) { } } -void testProjectionConv(size_t groups) { +void testProjectionConv(size_t groups, bool isDeconv) { const int NUM_FILTERS = 18; const int FILTER_SIZE = 2; - const int FILTER_SIZE_Y = 3; + const int FILTER_SIZE_Y = 4; const int CHANNELS = 3; const int IMAGE_SIZE = 16; ProjectionConfig conf; - conf.set_type("conv"); + if (isDeconv) { + conf.set_type("convt"); + } else { + conf.set_type("conv"); + } conf.set_num_filters(NUM_FILTERS); ConvConfig* conv = conf.mutable_conv_conf(); @@ -186,7 +190,11 @@ void testProjectionConv(size_t groups) { conv->set_stride(2); conv->set_stride_y(2); conv->set_groups(groups); - conv->set_filter_channels(conv->channels() / conv->groups()); + if (isDeconv) { + conv->set_filter_channels(NUM_FILTERS / conv->groups()); + } else { + conv->set_filter_channels(conv->channels() / conv->groups()); + } conv->set_img_size(IMAGE_SIZE); int output_x = outputSize(conv->img_size(), conv->filter_size(), @@ -199,8 +207,14 @@ void testProjectionConv(size_t groups) { conv->stride_y(), /* caffeMode */ true); conv->set_output_x(output_x); - conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS); - conf.set_output_size(output_x * output_y * NUM_FILTERS); + conv->set_output_y(output_y); + if (isDeconv) { + conf.set_input_size(output_x * output_y * CHANNELS); + conf.set_output_size(IMAGE_SIZE * IMAGE_SIZE * NUM_FILTERS); + } else { + conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS); + conf.set_output_size(output_x * output_y * NUM_FILTERS); + } testProjectionGrad(conf, INPUT_DATA, @@ -215,8 +229,12 @@ void testProjectionConv(size_t groups) { #ifndef PADDLE_ONLY_CPU TEST(Projection, conv) { - testProjectionConv(1); - testProjectionConv(3); + /// test ConvProjection + testProjectionConv(1, false); + testProjectionConv(3, false); + /// test ConvTransProjection + testProjectionConv(1, true); + testProjectionConv(3, true); } #endif @@ -276,27 +294,6 @@ TEST(Layer, AddtoLayer) { } } -TEST(Layer, CRFLayer) { - TestConfig config; - config.layerConfig.set_type("crf"); - config.layerConfig.set_size(10); - config.biasSize = 0; - - config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 10, 120}); - config.inputDefs.push_back({INPUT_SEQUENCE_LABEL, "layer_1", 10, 0}); - config.layerConfig.add_inputs(); - config.layerConfig.add_inputs(); - - // Not support GPU now - testLayerGrad(config, - "crf", - 100, - /* trans */ false, - /* useGpu */ false, - false /*useWeight*/, - 0.03 /*epsilon*/); -} - TEST(Layer, CTCLayer) { TestConfig config; config.layerConfig.set_type("ctc"); @@ -406,11 +403,11 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) { config.layerConfig.set_partial_sum(1); config.layerConfig.set_shared_biases(true); - config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288}); + config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384}); LayerInputConfig* input = config.layerConfig.add_inputs(); ConvConfig* conv = input->mutable_conv_conf(); conv->set_filter_size(2); - conv->set_filter_size_y(3); + conv->set_filter_size_y(4); conv->set_channels(16); conv->set_padding(0); conv->set_padding_y(1); @@ -437,6 +434,9 @@ TEST(Layer, convTransLayer) { for (auto useGpu : {false, true}) { testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu); } +#ifndef PADDLE_ONLY_CPU + testConvTransLayer("cudnn_convt", /* trans= */ false, /* useGpu= */ true); +#endif } TEST(Layer, blockExpandLayer) { @@ -804,10 +804,14 @@ TEST(Layer, ExpandLayer) { testExpandLayer("seq", true); // seq expand to hasSubseq } -void testDegradeLayer(bool hasSubseq, string layer_type, string trans_type) { +void testDegradeLayer(bool hasSubseq, + string layer_type, + string trans_type, + int stride) { TestConfig config; config.layerConfig.set_type(layer_type); config.layerConfig.set_size(10); + config.layerConfig.set_seq_pool_stride(stride); config.biasSize = 0; config.inputDefs.push_back( @@ -827,36 +831,46 @@ void testDegradeLayer(bool hasSubseq, string layer_type, string trans_type) { if (layer_type == "average") { for (auto strategy : {"average", "sum", "squarerootn"}) { LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type - << " average_strategy=" << strategy; + << " average_strategy=" << strategy + << " seq_pool_stride=" << stride; config.layerConfig.set_average_strategy(strategy); testDegradeLayerGrad(config, layer_type); } } else { - LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type; + LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type + << " seq_pool_stride=" << stride; testDegradeLayerGrad(config, layer_type); } } TEST(Layer, MaxLayer) { - testDegradeLayer(false, "max", "non-seq"); // seq max to non-seq - testDegradeLayer(true, "max", "non-seq"); // hasSubseq max to non-seq - testDegradeLayer(true, "max", "seq"); // hasSubseq max to seq + testDegradeLayer(false, "max", "non-seq", -1); // seq max to non-seq + testDegradeLayer(true, "max", "non-seq", -1); // hasSubseq max to non-seq + testDegradeLayer(true, "max", "seq", -1); // hasSubseq max to seq } TEST(Layer, SequenceLastInstanceLayer) { testDegradeLayer(false, "seqlastins", - "non-seq"); // seq seqlastins to non-seq + "non-seq", + -1); // seq seqlastins to non-seq + testDegradeLayer(false, + "seqlastins", + "non-seq", + 5); // seq seqlastins to a shorten seq, stride window = 5 testDegradeLayer(true, "seqlastins", - "non-seq"); // hasSubseq seqlastins to non-seq - testDegradeLayer(true, "seqlastins", "seq"); // hasSubseq seqlastins to seq + "non-seq", + -1); // hasSubseq seqlastins to non-seq + testDegradeLayer( + true, "seqlastins", "seq", -1); // hasSubseq seqlastins to seq } TEST(Layer, AverageLayer) { - testDegradeLayer(false, "average", "non-seq"); // seq average to non-seq - testDegradeLayer(true, "average", "non-seq"); // hasSubseq average to non-seq - testDegradeLayer(true, "average", "seq"); // hasSubseq average to seq + testDegradeLayer(false, "average", "non-seq", -1); // seq average to non-seq + testDegradeLayer( + true, "average", "non-seq", -1); // hasSubseq average to non-seq + testDegradeLayer(true, "average", "seq", -1); // hasSubseq average to seq } TEST(Layer, SequenceConcatLayer) { @@ -1503,16 +1517,20 @@ TEST(Layer, BatchNormalizationLayer) { #endif } -TEST(Operator, conv) { +void testConvOperator(bool isDeconv) { TestConfig config; const int NUM_FILTERS = 16; const int FILTER_SIZE = 2; const int FILTER_SIZE_Y = 3; const int CHANNELS = 3; const int IMAGE_SIZE = 16; - const int IMAGE_SIZE_Y = 8; + const int IMAGE_SIZE_Y = 9; OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs(); - operatorConf.set_type("conv"); + if (isDeconv) { + operatorConf.set_type("convt"); + } else { + operatorConf.set_type("conv"); + } ConvConfig* conv = operatorConf.mutable_conv_conf(); operatorConf.set_num_filters(NUM_FILTERS); conv->set_filter_size(FILTER_SIZE); @@ -1523,7 +1541,6 @@ TEST(Operator, conv) { conv->set_stride(2); conv->set_stride_y(2); conv->set_groups(1); - conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_img_size(IMAGE_SIZE); conv->set_img_size_y(IMAGE_SIZE_Y); conv->set_output_x(outputSize(conv->img_size(), @@ -1536,11 +1553,22 @@ TEST(Operator, conv) { conv->padding_y(), conv->stride_y(), /* caffeMode */ true)); - config.layerConfig.set_size(conv->output_x() * conv->output_y() * - NUM_FILTERS); - config.inputDefs.push_back( - {INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0}); + if (isDeconv) { + conv->set_filter_channels(NUM_FILTERS / conv->groups()); + config.inputDefs.push_back({INPUT_DATA, + "layer_0", + conv->output_x() * conv->output_y() * CHANNELS, + 0}); + config.layerConfig.set_size(IMAGE_SIZE * IMAGE_SIZE_Y * NUM_FILTERS); + } else { + conv->set_filter_channels(conv->channels() / conv->groups()); + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0}); + config.layerConfig.set_size(conv->output_x() * conv->output_y() * + NUM_FILTERS); + } + config.inputDefs.push_back( {INPUT_DATA, "layer_1", @@ -1552,6 +1580,11 @@ TEST(Operator, conv) { testOperatorGrad(config, operatorConf, 100, /*useGpu*/ true, false); } +TEST(Operator, conv) { + testConvOperator(/*isDeconv*/ true); + testConvOperator(/*isDeconv*/ false); +} + TEST(Layer, FeatureMapExpandLayer) { TestConfig config; config.layerConfig.set_type("featmap_expand"); @@ -1623,6 +1656,55 @@ TEST(Layer, PadLayer) { } } +TEST(Layer, CrossChannelNormLayer) { + TestConfig config; + config.layerConfig.set_type("norm"); + config.layerConfig.set_size(100); + LayerInputConfig* input = config.layerConfig.add_inputs(); + NormConfig* norm = input->mutable_norm_conf(); + norm->set_norm_type("cross-channel-norm"); + norm->set_channels(10); + norm->set_size(100); + norm->set_scale(0); + norm->set_pow(0); + norm->set_blocked(0); + config.inputDefs.push_back({INPUT_DATA, "layer_0", 100, 10}); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "cross-channel-norm", 10, false, useGpu, false, 5); + } +} + +TEST(Layer, smooth_l1) { + TestConfig config; + config.layerConfig.set_type("smooth_l1"); + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0}); + config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 1, 0}); + config.layerConfig.add_inputs(); + config.layerConfig.add_inputs(); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "smooth_l1", 100, false, useGpu, false, 2.0); + } +} + +TEST(Layer, TransLayer) { + TestConfig config; + const int height = 128; + const int width = 1028; + config.layerConfig.set_type("trans"); + config.layerConfig.set_size(width); + + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", /* dim= */ height * width, /* paraSize= */ 0}); + config.layerConfig.add_inputs(); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "trans", height, /* trans= */ false, useGpu); + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/gserver/tests/test_LinearChainCRF.cpp b/paddle/gserver/tests/test_LinearChainCRF.cpp index f046cb0b289c9ce22b98f3200bf0a3f7d48d77f5..b37277054c58a5f71cc4649fc6c062ca8dc1d4c9 100644 --- a/paddle/gserver/tests/test_LinearChainCRF.cpp +++ b/paddle/gserver/tests/test_LinearChainCRF.cpp @@ -36,7 +36,7 @@ TEST(LinearChainCRF, decoding) { real* a = para.getData(); real* b = para.getData() + numClasses; real* w = para.getData() + 2 * numClasses; - LinearChainCRF crf(4, para.getData(), nullptr); + LinearChainCRF crf(4, para.getData()); for (int length : {1, 2, 3, 10}) { for (int tries = 0; tries < 10; ++tries) { CpuMatrix x(length, numClasses); diff --git a/paddle/math/BaseMatrix.cu b/paddle/math/BaseMatrix.cu index 0a0d92d1ae65f5b6020eb71fe2a6db5a3c625d9c..de48b6fac9c7d8125a552022c52353ef6bcef995 100644 --- a/paddle/math/BaseMatrix.cu +++ b/paddle/math/BaseMatrix.cu @@ -1453,6 +1453,24 @@ void BaseMatrixT::divRowVector(BaseMatrixT& b) { true_type() /* bAsRowVector */, false_type()); } +template +void BaseMatrixT::mulColVector(BaseMatrixT& b) { + MatrixOffset offset(0, 0, 0, 0); + int numRows = height_; + int numCols = width_; + applyBinary(binary::DotMul(), b, numRows, numCols, offset, + false_type(), true_type() /* bAsColVector */); +} + +template +void BaseMatrixT::divColVector(BaseMatrixT& b) { + MatrixOffset offset(0, 0, 0, 0); + int numRows = height_; + int numCols = width_; + applyBinary(binary::DotDiv(), b, numRows, numCols, offset, + false_type(), true_type() /* bAsColVector */); +} + template<> template int BaseMatrixT::applyRow(Agg agg, BaseMatrixT& b) { diff --git a/paddle/math/BaseMatrix.h b/paddle/math/BaseMatrix.h index 8691c87ac3b88499a9676d59af533e0f4713dfc3..6ed48c8d88ee698689de6f7a7f470b97a094ea5b 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/math/BaseMatrix.h @@ -545,6 +545,9 @@ public: void mulRowVector(BaseMatrixT& b); void divRowVector(BaseMatrixT& b); + void mulColVector(BaseMatrixT& b); + void divColVector(BaseMatrixT& b); + void addP2P(BaseMatrixT& b); /** diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index d7aa1184872d5a6129becca1f6e282776c9dbe15..6203cd3b9ab9f95853cd3c46750fd55d6dfbba4a 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -85,11 +85,16 @@ int getrf(const CBLAS_ORDER order, float* A, const int lda, int* ipiv) { +#ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return clapack_sgetrf(order, M, N, A, lda, ipiv); #else return LAPACKE_sgetrf(order, M, N, A, lda, ipiv); #endif +#else + LOG(FATAL) << "Not implemented"; +#endif + return 0; } template <> @@ -99,11 +104,16 @@ int getrf(const CBLAS_ORDER order, double* A, const int lda, int* ipiv) { +#ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return clapack_dgetrf(order, M, N, A, lda, ipiv); #else return LAPACKE_dgetrf(order, M, N, A, lda, ipiv); #endif +#else + LOG(FATAL) << "Not implemented"; +#endif + return 0; } template <> @@ -112,11 +122,16 @@ int getri(const CBLAS_ORDER order, float* A, const int lda, const int* ipiv) { +#ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return clapack_sgetri(order, N, A, lda, ipiv); #else return LAPACKE_sgetri(order, N, A, lda, ipiv); #endif +#else + LOG(FATAL) << "Not implemented"; +#endif + return 0; } template <> @@ -125,11 +140,16 @@ int getri(const CBLAS_ORDER order, double* A, const int lda, const int* ipiv) { +#ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS return clapack_dgetri(order, N, A, lda, ipiv); #else return LAPACKE_dgetri(order, N, A, lda, ipiv); #endif +#else + LOG(FATAL) << "Not implemented"; +#endif + return 0; } template <> diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h index c8559eefd8378450fc18c2ba821c65b39c8cc046..9f8f84a87c5e60b2a6573844f251c42152d8156b 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/math/MathFunctions.h @@ -17,11 +17,14 @@ limitations under the License. */ #ifdef PADDLE_USE_MKL #include +#ifdef PADDLE_USE_LAPACK #include +#endif #else extern "C" { #include } +#ifdef PADDLE_USE_LAPACK #ifdef PADDLE_USE_ATLAS extern "C" { #include @@ -30,6 +33,7 @@ extern "C" { #include #endif #endif +#endif #include diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 07450bfb0ef709840f7e8253e87c227276529a2a..55a7344495f8e57dc95095ab1b81b45008fa9acc 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -483,6 +483,20 @@ void GpuMatrix::sequenceAvgForward(Matrix& a, hl_sequence_avg_forward(dst, src, starts, height, width, mode); } +void GpuMatrix::sequenceAvgBackward(Matrix& a, + const IVector& startsPos, + int mode) { + size_t height = a.getHeight(); + size_t width = getWidth(); + CHECK_EQ(height, startsPos.getSize() - 1); + CHECK_EQ(width, a.getWidth()); + real* dst = getData(); + real* src = a.getData(); + const int* starts = startsPos.getData(); + + hl_sequence_avg_backward(dst, src, starts, height, width, mode); +} + /* this = scaleAB*(a*b) + scaleT*this */ void GpuMatrix::mul(const GpuMatrix& a, const GpuMatrix& b, @@ -2304,6 +2318,41 @@ void CpuMatrix::sequenceAvgForward(Matrix& a, } } +void CpuMatrix::sequenceAvgBackward(Matrix& a, + const IVector& startsPos, + int mode) { + size_t height = a.getHeight(); + size_t width = getWidth(); + CHECK_EQ(height, startsPos.getSize() - 1); + CHECK_EQ(width, a.getWidth()); + real* dst = getData(); + real* src = a.getData(); + const int* starts = startsPos.getData(); + MatrixPtr outMtx = Matrix::create(nullptr, 1, width, false, false); + MatrixPtr dataMtx = Matrix::create(nullptr, 1, width, false, false); + for (size_t i = 0; i < height; ++i) { + int sequenceLength = starts[i + 1] - starts[i]; + if (0 == sequenceLength) { + // empty sequence + continue; + } + outMtx->setData(dst + starts[i] * width, sequenceLength, width); + dataMtx->setData(src + i * width); + if (mode == 0) { + // plain average + outMtx->addBias(*dataMtx, 1.0f / sequenceLength); + } else if (mode == 1) { + // sum instead of average + outMtx->addBias(*dataMtx, 1.0f); + } else if (mode == 2) { + // divide by square root of sequenceLength + outMtx->addBias(*dataMtx, 1.0f / std::sqrt(sequenceLength)); + } else { + LOG(FATAL) << "should not reach here"; + } + } +} + /* this = scaleAB*(a*b) + scaleT*this*/ void CpuMatrix::mul(const Matrix& a, const Matrix& b, @@ -2377,41 +2426,8 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { int lda = a->getStride(); int ldb = b->getStride(); int ldc = getStride(); -#ifndef PADDLE_TYPE_DOUBLE - cblas_sgemm(CblasRowMajor, - a_trans, - b_trans, - M, - N, - K, - scaleAB, - A, - lda, - B, - ldb, - scaleT, - C, - ldc); -#else - cblas_dgemm(CblasRowMajor, - a_trans, - b_trans, - M, - N, - K, - scaleAB, - A, - lda, - B, - ldb, - scaleT, - C, - ldc); -// TODO(yuyang18): Is gemm defined other place? -#endif - - VLOG(2) << " A[0]=" << A[0] << " A[1]=" << A[1] << " B[0]=" << B[0] - << " B[1]=" << B[1] << " C[0]=" << C[0] << " C[1]=" << C[1]; + gemm( + a_trans, b_trans, M, N, K, scaleAB, A, lda, B, ldb, scaleT, C, ldc); } void CpuMatrix::mul( @@ -3590,6 +3606,55 @@ void CpuMatrix::sumOfSquaresBp(Matrix& output, Matrix& label) { } } +void CpuMatrix::smoothL1(Matrix& output, Matrix& label) { + CHECK(output.useGpu_ == false && label.useGpu_ == false) + << "Matrix type are not equal"; + + size_t numSamples = getHeight(); + size_t dim = output.getWidth(); + CHECK_EQ(label.getHeight(), numSamples); + CHECK_EQ(output.getHeight(), numSamples); + CHECK_EQ(label.getWidth(), dim); + CHECK_EQ(getWidth(), (size_t)1); + real* out = output.getData(); + real* cost = getData(); + real* lbl = label.getData(); + + for (size_t i = 0; i < numSamples; ++i, out += dim, cost += dim, lbl += dim) { + for (size_t j = 0; j < dim; ++j) { + cost[j] = std::fabs(out[j] - lbl[j]); + if (cost[j] < 1.0) + cost[j] = 0.5 * cost[j] * cost[j]; + else + cost[j] = cost[j] - 0.5; + } + } +} + +void CpuMatrix::smoothL1Bp(Matrix& output, Matrix& label) { + CHECK(output.useGpu_ == false && label.useGpu_ == false) + << "Matrix type are not equal"; + + size_t numSamples = getHeight(); + size_t dim = output.getWidth(); + CHECK_EQ(label.getHeight(), numSamples); + CHECK_EQ(output.getHeight(), numSamples); + CHECK_EQ(label.getWidth(), dim); + CHECK_EQ(getWidth(), (size_t)1); + real* out = output.getData(); + real* cost = getData(); + real* lbl = label.getData(); + + // f'(x) = x if |x| < 1 + // = sign(x) otherwise + for (size_t i = 0; i < numSamples; ++i, out += dim, cost += dim, lbl += dim) { + for (size_t j = 0; j < dim; ++j) { + cost[j] = out[j] - lbl[j]; + if (std::fabs(cost[j]) >= 1) cost[j] = (0 < cost[j]) - (cost[j] < 0); + } + } +} + void CpuMatrix::tanh(Matrix& output) { CHECK(isContiguous()); CHECK(output.isContiguous()); diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index d0ba2e93feabfcc11ac1d261bc40c9c6973a8c29..3252adb19e4c2e48f86c3c811bfc7d75fd06a8f7 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -461,6 +461,12 @@ public: LOG(FATAL) << "Not implemented"; } + virtual void sequenceAvgBackward(Matrix& a, + const IVector& startsPos, + int mode) { + LOG(FATAL) << "Not implemented"; + } + /** * @code * this = scaleAB*(a*b) + scaleT*this @@ -783,6 +789,14 @@ public: LOG(FATAL) << "Not implemented"; } + virtual void smoothL1(Matrix& output, Matrix& label) { + LOG(FATAL) << "Not implemented"; + } + + virtual void smoothL1Bp(Matrix& outputV, Matrix& label) { + LOG(FATAL) << "Not implemented"; + } + virtual void tanh(Matrix& output) { LOG(FATAL) << "Not implemented"; } virtual void tanhDerivative(Matrix& output) { @@ -1195,6 +1209,7 @@ public: void collectSharedBias(Matrix& a, real scale); void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode); + void sequenceAvgBackward(Matrix& a, const IVector& startsPos, int mode); /** * @code @@ -1611,6 +1626,7 @@ public: void collectSharedBias(Matrix& a, real scale); void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode); + void sequenceAvgBackward(Matrix& a, const IVector& startsPos, int mode); /** * @code @@ -1720,6 +1736,9 @@ public: /// gradient of sumOfSquares. void sumOfSquaresBp(Matrix& outputV, Matrix& label); + void smoothL1(Matrix& output, Matrix& label); + void smoothL1Bp(Matrix& output, Matrix& label); + void tanh(Matrix& output); void tanhDerivative(Matrix& output); diff --git a/paddle/math/SIMDFunctions.cpp b/paddle/math/SIMDFunctions.cpp index 95219debf50e57407b668d315b91141d259fc779..d66d543a61450b47b7758b50eaecc107c6fe3576 100644 --- a/paddle/math/SIMDFunctions.cpp +++ b/paddle/math/SIMDFunctions.cpp @@ -13,119 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SIMDFunctions.h" +#ifdef __SSE3__ #include +#endif #include -#ifndef __AVX__ -static void addto_sse(float* a, const float* b, size_t len) { - int offset = len % 16; - __m128 ma0, ma1, ma2, ma3; - __m128 mb0, mb1, mb2, mb3; - - for (unsigned int k = 0; k < len / 16; k++, a += 16, b += 16) { - ma0 = _mm_load_ps(a); - ma1 = _mm_load_ps(a + 4); - ma2 = _mm_load_ps(a + 8); - ma3 = _mm_load_ps(a + 12); - - mb0 = _mm_load_ps(b); - mb1 = _mm_load_ps(b + 4); - mb2 = _mm_load_ps(b + 8); - mb3 = _mm_load_ps(b + 12); - - ma0 = _mm_add_ps(ma0, mb0); - ma1 = _mm_add_ps(ma1, mb1); - ma2 = _mm_add_ps(ma2, mb2); - ma3 = _mm_add_ps(ma3, mb3); - - _mm_store_ps(a, ma0); - _mm_store_ps(a + 4, ma1); - _mm_store_ps(a + 8, ma2); - _mm_store_ps(a + 12, ma3); - } - - for (int i = 0; i < offset; i++) a[i] += b[i]; -} - -static void batch_addto_sse(float* a, const float* b[], int batch, size_t len) { - int offset = len % 16; - - __m128 ma0, ma1, ma2, ma3; - __m128 mb0, mb1, mb2, mb3; - - for (unsigned int k = 0; k < len / 16; k++, a += 16) { - ma0 = _mm_load_ps(a); - ma1 = _mm_load_ps(a + 4); - ma2 = _mm_load_ps(a + 8); - ma3 = _mm_load_ps(a + 12); - - for (int i = 0; i < batch; i++) { - mb0 = _mm_load_ps(b[i]); - mb1 = _mm_load_ps(b[i] + 4); - mb2 = _mm_load_ps(b[i] + 8); - mb3 = _mm_load_ps(b[i] + 12); - ma0 = _mm_add_ps(ma0, mb0); - ma1 = _mm_add_ps(ma1, mb1); - ma2 = _mm_add_ps(ma2, mb2); - ma3 = _mm_add_ps(ma3, mb3); - b[i] += 16; - } - - _mm_store_ps(a, ma0); - _mm_store_ps(a + 4, ma1); - _mm_store_ps(a + 8, ma2); - _mm_store_ps(a + 12, ma3); - } - - for (int i = 0; i < offset; i++) { - for (int k = 0; k < batch; k++) a[i] += b[k][i]; - } - return; -} - -static void col_max_sse(float* result, - const float* data, - int dim, - int numSamples) { - // first sample, direct copy - for (int d = 0; d < dim; ++d) { - result[d] = data[d]; - } - int offset = dim % 16; - __m128 ma0, ma1, ma2, ma3; - __m128 mb0, mb1, mb2, mb3; - // first 16n dims - for (int k = 0; k < dim / 16; k++, result += 16, data += 16) { - ma0 = _mm_load_ps(result); - ma1 = _mm_load_ps(result + 4); - ma2 = _mm_load_ps(result + 8); - ma3 = _mm_load_ps(result + 12); - for (int i = 1; i < numSamples; i++) { - mb0 = _mm_load_ps(data + i * dim); - mb1 = _mm_load_ps(data + i * dim + 4); - mb2 = _mm_load_ps(data + i * dim + 8); - mb3 = _mm_load_ps(data + i * dim + 12); - ma0 = _mm_max_ps(ma0, mb0); - ma1 = _mm_max_ps(ma1, mb1); - ma2 = _mm_max_ps(ma2, mb2); - ma3 = _mm_max_ps(ma3, mb3); - } - _mm_store_ps(result, ma0); - _mm_store_ps(result + 4, ma1); - _mm_store_ps(result + 8, ma2); - _mm_store_ps(result + 12, ma3); - } - // last dims - for (int d = 0; d < offset; ++d) { - float sm = data[d]; - for (int i = 1; i < numSamples; ++i) { - sm = std::max(sm, data[i * dim + d]); - } - result[d] = sm; - } -} - -#else +#ifdef __AVX__ static void addto_avx(float* a, const float* b, size_t len) { int offset = len % 32; @@ -355,17 +248,128 @@ static void decayL1_avx( } } +#elif defined(__SSE3__) + +static void addto_sse(float* a, const float* b, size_t len) { + int offset = len % 16; + __m128 ma0, ma1, ma2, ma3; + __m128 mb0, mb1, mb2, mb3; + + for (unsigned int k = 0; k < len / 16; k++, a += 16, b += 16) { + ma0 = _mm_load_ps(a); + ma1 = _mm_load_ps(a + 4); + ma2 = _mm_load_ps(a + 8); + ma3 = _mm_load_ps(a + 12); + + mb0 = _mm_load_ps(b); + mb1 = _mm_load_ps(b + 4); + mb2 = _mm_load_ps(b + 8); + mb3 = _mm_load_ps(b + 12); + + ma0 = _mm_add_ps(ma0, mb0); + ma1 = _mm_add_ps(ma1, mb1); + ma2 = _mm_add_ps(ma2, mb2); + ma3 = _mm_add_ps(ma3, mb3); + + _mm_store_ps(a, ma0); + _mm_store_ps(a + 4, ma1); + _mm_store_ps(a + 8, ma2); + _mm_store_ps(a + 12, ma3); + } + + for (int i = 0; i < offset; i++) a[i] += b[i]; +} + +static void batch_addto_sse(float* a, const float* b[], int batch, size_t len) { + int offset = len % 16; + + __m128 ma0, ma1, ma2, ma3; + __m128 mb0, mb1, mb2, mb3; + + for (unsigned int k = 0; k < len / 16; k++, a += 16) { + ma0 = _mm_load_ps(a); + ma1 = _mm_load_ps(a + 4); + ma2 = _mm_load_ps(a + 8); + ma3 = _mm_load_ps(a + 12); + + for (int i = 0; i < batch; i++) { + mb0 = _mm_load_ps(b[i]); + mb1 = _mm_load_ps(b[i] + 4); + mb2 = _mm_load_ps(b[i] + 8); + mb3 = _mm_load_ps(b[i] + 12); + ma0 = _mm_add_ps(ma0, mb0); + ma1 = _mm_add_ps(ma1, mb1); + ma2 = _mm_add_ps(ma2, mb2); + ma3 = _mm_add_ps(ma3, mb3); + b[i] += 16; + } + + _mm_store_ps(a, ma0); + _mm_store_ps(a + 4, ma1); + _mm_store_ps(a + 8, ma2); + _mm_store_ps(a + 12, ma3); + } + + for (int i = 0; i < offset; i++) { + for (int k = 0; k < batch; k++) a[i] += b[k][i]; + } + return; +} + +static void col_max_sse(float* result, + const float* data, + int dim, + int numSamples) { + // first sample, direct copy + for (int d = 0; d < dim; ++d) { + result[d] = data[d]; + } + int offset = dim % 16; + __m128 ma0, ma1, ma2, ma3; + __m128 mb0, mb1, mb2, mb3; + // first 16n dims + for (int k = 0; k < dim / 16; k++, result += 16, data += 16) { + ma0 = _mm_load_ps(result); + ma1 = _mm_load_ps(result + 4); + ma2 = _mm_load_ps(result + 8); + ma3 = _mm_load_ps(result + 12); + for (int i = 1; i < numSamples; i++) { + mb0 = _mm_load_ps(data + i * dim); + mb1 = _mm_load_ps(data + i * dim + 4); + mb2 = _mm_load_ps(data + i * dim + 8); + mb3 = _mm_load_ps(data + i * dim + 12); + ma0 = _mm_max_ps(ma0, mb0); + ma1 = _mm_max_ps(ma1, mb1); + ma2 = _mm_max_ps(ma2, mb2); + ma3 = _mm_max_ps(ma3, mb3); + } + _mm_store_ps(result, ma0); + _mm_store_ps(result + 4, ma1); + _mm_store_ps(result + 8, ma2); + _mm_store_ps(result + 12, ma3); + } + // last dims + for (int d = 0; d < offset; ++d) { + float sm = data[d]; + for (int i = 1; i < numSamples; ++i) { + sm = std::max(sm, data[i * dim + d]); + } + result[d] = sm; + } +} + #endif -#ifndef __AVX__ -#define SIMD_INVOKE(func, ...) func##_sse(__VA_ARGS__) -#else +#if defined(__AVX__) #define SIMD_INVOKE(func, ...) func##_avx(__VA_ARGS__) +#elif defined(__SSE3__) +#define SIMD_INVOKE(func, ...) func##_sse(__VA_ARGS__) #endif namespace paddle { namespace simd { namespace internal { +#ifdef __SSE3__ void addToImpl(float* a, const float* b, size_t len) { SIMD_INVOKE(addto, a, b, len); } @@ -376,6 +380,7 @@ void batchAddToImpl(float* a, const float* b[], int batch, size_t len) { void colMaxImpl(float* result, const float* data, int dim, int numSamples) { SIMD_INVOKE(col_max, result, data, dim, numSamples); } +#endif #ifdef __AVX__ void decayL1AvxImpl(float* dst, float* src, float lambda, size_t len) { @@ -385,8 +390,8 @@ void decayL1AvxImpl( float* dst, float* src, float* lr, float lambda, size_t len) { decayL1_avx(dst, src, lr, lambda, len); } - #endif + } // namespace internal } // namespace simd } // namespace paddle diff --git a/paddle/math/SIMDFunctions.h b/paddle/math/SIMDFunctions.h index 9b0a8719b287a2b88e966484090974586d64521f..439f11b79d134d7054f45f2d0a70fc5a6fde6c13 100644 --- a/paddle/math/SIMDFunctions.h +++ b/paddle/math/SIMDFunctions.h @@ -128,17 +128,29 @@ void decayL1AvxImpl( template <> inline void addTo(float* a, const float* b, size_t len) { +#ifdef __SSE3__ internal::addToImpl(a, b, len); +#else + naive::addTo(a, b, len); +#endif } template <> inline void batchAddTo(float* a, const float* b[], int batch, size_t len) { +#ifdef __SSE3__ internal::batchAddToImpl(a, b, batch, len); +#else + naive::batchAddTo(a, b, batch, len); +#endif } template <> inline void colMax(float* result, const float* data, int dim, int numSamples) { +#ifdef __SSE3__ internal::colMaxImpl(result, data, dim, numSamples); +#else + naive::colMax(result, data, dim, numSamples); +#endif } template <> diff --git a/paddle/math/Storage.cpp b/paddle/math/Storage.cpp index 56e5442394b04230c22d668aa734dc0fa44004c2..7ce17a3207becb176a852a16fca52376009db9ee 100644 --- a/paddle/math/Storage.cpp +++ b/paddle/math/Storage.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "Storage.h" #include "Allocator.h" +#include "paddle/utils/StringUtil.h" #include "paddle/utils/Util.h" DEFINE_int32(pool_limit_size, @@ -62,7 +63,7 @@ PoolAllocator* StorageEngine::getGpuAllocator(int deviceId) { } if (gpuAllocator_[deviceId] == nullptr) { std::string name = - "gpu" + std::to_string(deviceId) + std::string("_pool"); + "gpu" + str::to_string(deviceId) + std::string("_pool"); gpuAllocator_[deviceId] = new PoolAllocator(new GpuAllocator(), FLAGS_pool_limit_size, name); } diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/math/tests/test_BaseMatrix.cpp index 21918b86e1ad98766ceaf09dea3020d6e8592191..22ce39701fca7b650fc03794cb0701e0987d2dae 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/math/tests/test_BaseMatrix.cpp @@ -110,6 +110,8 @@ TEST(BaseMatrix, BaseMatrix) { compare(&BaseMatrix::addRowVector); compare(&BaseMatrix::mulRowVector); compare(&BaseMatrix::divRowVector); + compare(&BaseMatrix::mulColVector); + compare(&BaseMatrix::divColVector); compare(&BaseMatrix::addP2P); compare(&BaseMatrix::invSqrt); } diff --git a/paddle/math/tests/test_RowBuffer.cpp b/paddle/math/tests/test_RowBuffer.cpp index 5f66f22ef73dcff1868c1a3e03139a680b1ce2b5..8cc4c69a1a4d8afec08bf7fb13408e135a06c09c 100644 --- a/paddle/math/tests/test_RowBuffer.cpp +++ b/paddle/math/tests/test_RowBuffer.cpp @@ -17,10 +17,10 @@ limitations under the License. */ TEST(RowBuffer, testAutoGrow) { paddle::RowBuffer buf(128); - ASSERT_EQ(128, buf.getWidth()); + ASSERT_EQ(128UL, buf.getWidth()); ASSERT_TRUE(buf.isAutoGrowth()); buf.resize(2); - ASSERT_EQ(2, buf.getRowCount()); + ASSERT_EQ(2UL, buf.getRowCount()); for (size_t i = 0; i < buf.getWidth() * 2; ++i) { buf.data()[i] = i; } @@ -35,7 +35,7 @@ TEST(RowBuffer, testAutoGrow) { data[i] = i; } - ASSERT_EQ(3, buf.getRowCount()); + ASSERT_EQ(3UL, buf.getRowCount()); for (size_t i = 0; i < buf.getRowCount() - 1; ++i) { for (size_t j = 0; j < buf.getWidth(); ++j) { ASSERT_NEAR(i * buf.getWidth() + j, buf.get(i)[j], 1e-5); @@ -51,7 +51,7 @@ TEST(RowBuffer, testWithMemBuf) { std::make_shared(128 * 2 * sizeof(real)); paddle::RowBuffer buf(mem, 128); ASSERT_TRUE(!buf.isAutoGrowth()); - ASSERT_EQ(2, buf.getRowCount()); + ASSERT_EQ(2UL, buf.getRowCount()); for (size_t i = 0; i < buf.getWidth() * 2; ++i) { buf.data()[i] = i; } diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 08b64c1bb6f5d359a2d2164e723a76c5360168ee..dd19fe516fbf724a86479e6f27032614ab4c6106 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -685,7 +685,7 @@ TEST(SMatrix, topK) { } } -void testMatrixSequenceAvgForward(int batchSize, int inputDim, int mode) { +void testMatrixSequenceAvg(int batchSize, int inputDim, int mode) { MatrixPtr cpuInput = std::make_shared(batchSize, inputDim); MatrixPtr gpuInput = std::make_shared(batchSize, inputDim); cpuInput->randomizeUniform(); @@ -706,15 +706,25 @@ void testMatrixSequenceAvgForward(int batchSize, int inputDim, int mode) { gpuOutput->sequenceAvgForward(*gpuInput, *gpuSequence, mode); TensorCheckErr(*cpuOutput, *gpuOutput); + + MatrixPtr cpuInGrad = std::make_shared(batchSize, inputDim); + MatrixPtr gpuInGrad = std::make_shared(batchSize, inputDim); + cpuInGrad->randomizeUniform(); + gpuInGrad->copyFrom(*cpuInGrad); + + cpuInGrad->sequenceAvgBackward(*cpuOutput, *cpuSequence, mode); + gpuInGrad->sequenceAvgBackward(*gpuOutput, *gpuSequence, mode); + + TensorCheckErr(*cpuInGrad, *gpuInGrad); } -TEST(Matrix, sequenceAvgForward) { +TEST(Matrix, sequenceAvg) { for (auto batchSize : {10, 128, 6000}) { for (auto inputDim : {32, 100, 512}) { for (auto mode : {0, 1, 2}) { VLOG(3) << " batchSize=" << batchSize << " inputDim=" << inputDim << " mode=" << mode; - testMatrixSequenceAvgForward(batchSize, inputDim, mode); + testMatrixSequenceAvg(batchSize, inputDim, mode); } } } diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index 7a343cca33f5b420be6192231ac73ca1c2da5fb9..645bf737990638df042723ed827d0823cb201e72 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -123,46 +123,6 @@ static void resizeAndCopy(ICpuGpuVectorPtr& dest, } } -static void resizeAndCopy(UserDefinedVectorPtr& dest, - const UserDefinedVectorPtr& src, - bool useGpu, - hl_stream_t stream) { - if (src) { - CHECK(!useGpu) << "not implemented"; - size_t height = src->size(); - if (!dest) { - dest = std::make_shared>(height); - } else { - dest->resize(height); - } - std::copy_n(src->begin(), height, dest->begin()); - } else { - dest.reset(); - } -} - -static void resizeAndCopy(UserDefinedVectorPtr& dest, - const UserDefinedVectorPtr& src, - int32_t startPos, - int32_t copySize, - bool useGpu, - hl_stream_t stream = HPPL_STREAM_DEFAULT) { - if (src) { - CHECK(!useGpu) << "not implemented"; - CHECK_LE((size_t)startPos + copySize, src->size()); - - size_t height = copySize; - if (!dest) { - dest = std::make_shared>(height); - } else { - dest->resize(height); - } - std::copy_n(src->begin() + startPos, height, dest->begin()); - } else { - dest.reset(); - } -} - static void resizeAndCopy(SVectorPtr& dest, const SVectorPtr& src, bool useGpu, @@ -223,7 +183,6 @@ void Argument::resizeAndCopyFrom(const Argument& src, false /* useGpu */, stream); } - resizeAndCopy(udp, src.udp, useGpu, stream); resizeAndCopy(strs, src.strs, useGpu, stream); frameWidth = src.frameWidth; frameHeight = src.frameHeight; @@ -255,7 +214,6 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src, resizeAndCopy(value, src.value, startRow, copySize, useGpu, stream); resizeAndCopy(grad, src.grad, startRow, copySize, useGpu, stream); resizeAndCopy(ids, src.ids, startRow, copySize, useGpu, stream); - resizeAndCopy(udp, src.udp, startRow, copySize, useGpu, stream); resizeAndCopy(strs, src.strs, startRow, copySize, useGpu, stream); return copySize; } else { @@ -268,7 +226,6 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src, resizeAndCopy(value, src.value, startRow, copyFeatureSize, useGpu, stream); resizeAndCopy(grad, src.grad, startRow, copyFeatureSize, useGpu, stream); resizeAndCopy(ids, src.ids, startRow, copyFeatureSize, useGpu, stream); - resizeAndCopy(udp, src.udp, startRow, copySize, useGpu, stream); resizeAndCopy(sequenceStartPositions, src.sequenceStartPositions, startSeq, @@ -583,7 +540,7 @@ void Argument::checkSubset() const { } } -void Argument::degradeSequence(const Argument& input, bool useGpu) { +void Argument::degradeSequence(const Argument& input) { CHECK_EQ(input.hasSubseq(), 1UL); size_t numSequences = input.getNumSequences(); size_t numSubSequences = input.getNumSubSequences(); @@ -602,6 +559,49 @@ void Argument::degradeSequence(const Argument& input, bool useGpu) { tgtBuf[numSequences] = numSubSequences; } +void Argument::poolSequenceWithStride(const Argument& input, + size_t stride, + IVectorPtr* stridePostions, + bool reversed) { + // If input.sequenceStartPositions = [0, 9, 14, 17, 30] and stride = 5, + // then sequenceStartPositions = [0, 2, 3, 4, 7]. + // If reversed = false, stridePostions = [0, 5, 9, 14, 17, 22, 27, 30]; + // else reversed = true, stridePostions = [0, 4, 9, 14, 17, 20, 25, 30] + + CHECK(input.sequenceStartPositions); + CHECK_EQ(input.hasSubseq(), 0UL); + CHECK_GT(stride, 0) << "stride must larger than 0"; + size_t numSequences = input.getNumSequences(); + ICpuGpuVector::resizeOrCreate( + sequenceStartPositions, numSequences + 1, false); + const int* starts = input.sequenceStartPositions->getData(false); + int* tgtBuf = sequenceStartPositions->getMutableData(false); + // first index of target sequence and stride positions are both 0 + tgtBuf[0] = 0; + std::vector stridePos; + for (size_t seqId = 0; seqId < numSequences; ++seqId) { + size_t seqLength = starts[seqId + 1] - starts[seqId]; + stridePos.emplace_back(starts[seqId]); + if (seqLength == 0) { + // empty sequence + tgtBuf[seqId + 1] = tgtBuf[seqId]; + } else { + int size = ceil((float)seqLength / stride); + tgtBuf[seqId + 1] = tgtBuf[seqId] + size; + for (int i = 0; i < size - 1; ++i) { + int cur = reversed ? starts[seqId + 1] - (size - 1 - i) * stride + : stridePos.back() + stride; + stridePos.emplace_back(cur); + } + } + } + stridePos.emplace_back(starts[numSequences]); + int size = stridePos.size(); + CHECK_EQ(size - 1, tgtBuf[numSequences]); + IVector::resizeOrCreate(*stridePostions, size, false); + (*stridePostions)->copyFrom(stridePos.data(), size); +} + void Argument::getValueString( std::unordered_map* out) const { if (value) { diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 178c068b93ac5fc1e06200984f14da86069cf7e4..91aca98e186aef0ad6b345cf4791ef80c616e3fe 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -24,8 +24,6 @@ limitations under the License. */ namespace paddle { -// vector of user defined pointers -typedef std::shared_ptr> UserDefinedVectorPtr; typedef std::shared_ptr> SVectorPtr; struct Argument { @@ -40,7 +38,6 @@ struct Argument { sequenceStartPositions(nullptr), subSequenceStartPositions(nullptr), cpuSequenceDims(nullptr), - udp(nullptr), deviceId(-1), allCount(0), valueCount(0), @@ -63,7 +60,6 @@ struct Argument { sequenceStartPositions = argument.sequenceStartPositions; subSequenceStartPositions = argument.subSequenceStartPositions; cpuSequenceDims = argument.cpuSequenceDims; - udp = argument.udp; deviceId = argument.deviceId; allCount = argument.allCount; frameHeight = argument.frameHeight; @@ -96,8 +92,6 @@ struct Argument { // dimension of sequence, stored only in CPU IVectorPtr cpuSequenceDims; - UserDefinedVectorPtr udp; // user defined pointer - int deviceId; // the GPU device id which the argument in int allCount; // the number of output layers using this argument mutable int valueCount; // waiting this member when layer do forward @@ -137,7 +131,6 @@ struct Argument { if (ids) return ids->getSize(); if (grad) return grad->getHeight(); if (in) return in->getHeight(); - if (udp) return udp->size(); if (strs) return strs->size(); return 0; } @@ -163,7 +156,7 @@ struct Argument { : sequenceStartPositions->getData(false); } - static inline real sumCosts(const std::vector& arguments) { + static inline real sum(const std::vector& arguments) { real cost = 0; for (auto& arg : arguments) { if (arg.value) { @@ -296,8 +289,17 @@ struct Argument { /* sequence has sub-sequence degrades to a sequence. */ - void degradeSequence(const Argument& input, bool useGpu); + void degradeSequence(const Argument& input); + /* + After pooling with stride n (n is smaller than sequence length), + a long sequence will be shorten. + This function is invalid for sequence having sub-sequence. + */ + void poolSequenceWithStride(const Argument& input, + size_t stride, + IVectorPtr* stridePositions, + bool reversed = false); /** * @brief getValueString will return the argument's output in string. There * are several kinds of output. The keys of output dictionary are 'value', diff --git a/paddle/parameter/tests/CMakeLists.txt b/paddle/parameter/tests/CMakeLists.txt index cab264db8e5000e8eb61830ec07e9f590c103119..181ccdc1f099e8d61a44c1741116abe7afe0f11d 100644 --- a/paddle/parameter/tests/CMakeLists.txt +++ b/paddle/parameter/tests/CMakeLists.txt @@ -1 +1,2 @@ add_simple_unittest(test_common) +add_simple_unittest(test_argument) diff --git a/paddle/parameter/tests/test_argument.cpp b/paddle/parameter/tests/test_argument.cpp new file mode 100644 index 0000000000000000000000000000000000000000..81fe4ee397351a013c8616ad08fb8cb4b8dae4d0 --- /dev/null +++ b/paddle/parameter/tests/test_argument.cpp @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +using namespace paddle; // NOLINT + +TEST(Argument, poolSequenceWithStride) { + Argument input, output; + ICpuGpuVector::resizeOrCreate(input.sequenceStartPositions, 5, false); + int* inStart = input.sequenceStartPositions->getMutableData(false); + inStart[0] = 0; + inStart[1] = 9; + inStart[2] = 14; + inStart[3] = 17; + inStart[4] = 30; + + int strideResult[] = {0, 5, 9, 14, 17, 22, 27, 30}; + int strideResultReversed[] = {0, 4, 9, 14, 17, 20, 25, 30}; + + for (auto reversed : {false, true}) { + IVectorPtr stridePositions; + output.poolSequenceWithStride( + input, 5 /* stride */, &stridePositions, reversed); + + const int* outStart = output.sequenceStartPositions->getData(false); + CHECK_EQ(outStart[0], 0); + CHECK_EQ(outStart[1], 2); + CHECK_EQ(outStart[2], 3); + CHECK_EQ(outStart[3], 4); + CHECK_EQ(outStart[4], 7); + + CHECK_EQ(stridePositions->getSize(), 8); + auto result = reversed ? strideResultReversed : strideResult; + for (int i = 0; i < 8; i++) { + CHECK_EQ(stridePositions->getData()[i], result[i]); + } + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index 11d7a147bf749ba2de0772b5efd5f73ab0ccdb1a..667bc451d16aa1436ac5d74dd96edbd70556edd0 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -30,9 +30,6 @@ namespace paddle { * the first solution arms with sendThreads_/recvThreads_ and sendJobQueue_/ * recvJobQueue_. the second solution use some shared thread pool to manage * connections. - * In addition to pserver, metric learning also uses network to exchange - * features within multi-machines, so this class just abstracts some basic - * threads and queue buffer creation for them */ class BaseClient { protected: diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index 856fa0ad1ab30e3fc554ac96dd3bed71b1548579..19ff40ba7e9584f772043f939bcb31caf666163d 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -29,6 +29,7 @@ limitations under the License. */ #include "paddle/utils/Flags.h" #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Stat.h" +#include "paddle/utils/StringUtil.h" DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec"); DEFINE_double(async_lagged_ratio_min, @@ -218,7 +219,8 @@ void ParameterServer2::setConfig(const SetConfigRequest& request, callback(response); /// always defined, barrier slowest node function need it. - statSet_.reset(new StatSet("ParameterServer" + std::to_string(serverId_))); + statSet_.reset(new StatSet("ParameterServer" + + str::to_string(static_cast(serverId_)))); } real bufferSum(const std::vector& buffers) { @@ -367,11 +369,8 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, std::vector* outputBuffers) { VLOG(1) << "pserver: addGradient"; -/// forwardbackward delta from all trainers -/// indicate the fluctuation caused by forwardbackward. -#ifndef PADDLE_METRIC_LEARNING - // @TODO(yanfei): - // add support tuning forwardbackward balance for metric learning + // forwardbackward delta from all trainers + // indicate the fluctuation caused by forwardbackward. if (!numPassFinishClients_) { REGISTER_BARRIER_DELTA_SERVER_SET( *statSet_, @@ -381,7 +380,6 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, request.forwardbackward_time(), isSparseServer_ ? "_sparseUpdater" : "_denseUpdater"); } -#endif { /// approximately pure network overhead diff --git a/paddle/pserver/test/CMakeLists.txt b/paddle/pserver/test/CMakeLists.txt index 64654f67d0c2c82f05a5038fb33b220f3cff0f39..6e8f9c37f64b70921e09241089a5a480fd8ca47f 100644 --- a/paddle/pserver/test/CMakeLists.txt +++ b/paddle/pserver/test/CMakeLists.txt @@ -10,9 +10,11 @@ add_test(NAME socket_test add_unittest_without_exec(test_ProtoServer test_ProtoServer.cpp) -add_test(NAME test_ProtoServer - COMMAND ${PROJ_ROOT}/paddle/.set_port.sh -p port - ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoServer) +IF(NOT ON_TRAVIS) + add_test(NAME test_ProtoServer + COMMAND ${PROJ_ROOT}/paddle/.set_port.sh -p port + ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoServer) +ENDIF(NOT ON_TRAVIS) # TODO(yuyang18): Run test_ProtoServer when with rdma # add_test(NAME test_ProtoServerRDMA diff --git a/paddle/py_paddle/.gitignore b/paddle/py_paddle/.gitignore index 9e8ad4bf1638a69ab7ef19badfbf867e116548d2..80d1f76fbc05627e21e334af55d63a4a534434c6 100644 --- a/paddle/py_paddle/.gitignore +++ b/paddle/py_paddle/.gitignore @@ -1 +1,2 @@ swig_paddle.py +_swig_paddle.so diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index 21d1cb75f4d40e6ed011b33c6366c9d31c0fcc7c..7c6b83541002071d6e9d00c17be97b6ce4bf8528 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -16,20 +16,74 @@ import paddle.trainer.PyDataProvider2 as dp2 import collections import swig_paddle import numpy +import itertools __all__ = ['DataProviderConverter'] class IScanner(object): + """ + The scanner will scan Python object two passes, then convert it to Paddle's + argument. + + In the first pass, `pre_scan` will be invoked by every data instance, and + then invoke `finish_pre_scan` to arguments. And the second pass do the same + thing except the functions changed to `scan`, `finish_scan`. + + During the first pass, a scanner may count the shape of input matrix and + allocate memory for this argument. Then fill the data into this argument + in second pass. + """ + def __init__(self, input_type, pos): self.input_type = input_type - assert isinstance(self.input_type, dp2.InputType) + if not isinstance(self.input_type, dp2.InputType): + raise ValueError("input type should be dataprovider2.InputType") self.pos = pos + # data_in_gpu is used to indicate whether to create argument on GPU + # or not in GPU mode. Now if using one thread (trainer_count=1), + # trainer uses NeuralNetwork which needs to create argument on GPU + # before calling forward function. So, set data_in_gpu to True. + # Otherwise, trainer uses MultiGradientMachine which will transfer + # data from CPU to GPU in the forward function, set data_in_gpu to + # False in this case. + self.data_in_gpu = swig_paddle.isUsingGpu( + ) and swig_paddle.getTrainerCount() == 1 + + def pre_scan(self, dat): + """ + First pass scan method. During this method, the scanner could count the + data number, and get the total memory size this batch would use. + + :param dat: The python object. + """ + pass + + def finish_pre_scan(self, argument): + """ + Finish first scan pass. Allocate the memory. + + :param argument: Output arguments object. + :type argument: swig_paddle.Arguments + :return: + """ + pass def scan(self, dat): + """ + Second pass scan method. Copy the data to arguments. + + :param dat: The python object. + """ pass def finish_scan(self, argument): + """ + Finish second pass. Finalize the resources, etc. + + :param argument: Output arguments object. + :type argument: swig_paddle.Arguments + """ pass @@ -41,19 +95,26 @@ class DenseScanner(IScanner): def __init__(self, input_type, pos): IScanner.__init__(self, input_type, pos) self.__mat__ = None + self.__height__ = 0 + + def pre_scan(self, dat): + self.__height__ += 1 + + def finish_pre_scan(self, argument): + self.__mat__ = numpy.ndarray( + shape=(self.__height__, self.input_type.dim), dtype=numpy.float32) + self.__height__ = 0 def scan(self, dat): - if self.__mat__ is None: - self.__mat__ = numpy.array([dat], dtype='float32') - else: - self.__mat__ = numpy.append(self.__mat__, [dat], axis=0) + self.__mat__[self.__height__] = dat + self.__height__ += 1 def finish_scan(self, argument): assert isinstance(argument, swig_paddle.Arguments) - assert isinstance(self.input_type, dp2.InputType) if self.__mat__.dtype != numpy.float32: self.__mat__ = self.__mat__.astype(numpy.float32) - m = swig_paddle.Matrix.createDenseFromNumpy(self.__mat__, True, False) + m = swig_paddle.Matrix.createDenseFromNumpy(self.__mat__, True, + self.data_in_gpu) argument.setSlotValue(self.pos, m) @@ -63,7 +124,6 @@ class SparseBinaryScanner(IScanner): self.__rows__ = [0] self.__cols__ = [] self.__height__ = 0 - self.__nnz__ = 0 self.__value__ = [] def scan(self, dat): @@ -76,11 +136,13 @@ class SparseBinaryScanner(IScanner): def finish_scan(self, argument): assert isinstance(argument, swig_paddle.Arguments) - assert isinstance(self.input_type, dp2.InputType) - m = swig_paddle.Matrix.createSparse(self.__height__, - self.input_type.dim, - len(self.__cols__), - len(self.__value__) == 0) + m = swig_paddle.Matrix.createSparse( + self.__height__, + self.input_type.dim, + len(self.__cols__), + len(self.__value__) == 0, + False, # trans + False) # TODO supoort GPU assert isinstance(m, swig_paddle.Matrix) m.sparseCopyFrom(self.__rows__, self.__cols__, self.__value__) argument.setSlotValue(self.pos, m) @@ -98,13 +160,22 @@ class SparseFloatScanner(SparseBinaryScanner): class IndexScanner(IScanner): def __init__(self, input_type, pos): IScanner.__init__(self, input_type, pos) - self.__ids__ = [] + self.__ids__ = None + self.__idx__ = 0 + + def pre_scan(self, dat): + self.__idx__ += 1 + + def finish_pre_scan(self, argument): + self.__ids__ = [0] * self.__idx__ + self.__idx__ = 0 def scan(self, dat): - self.__ids__.append(dat) + self.__ids__[self.__idx__] = dat + self.__idx__ += 1 def finish_scan(self, argument): - ids = swig_paddle.IVector.create(self.__ids__) + ids = swig_paddle.IVector.create(self.__ids__, self.data_in_gpu) assert isinstance(argument, swig_paddle.Arguments) argument.setSlotIds(self.pos, ids) @@ -116,6 +187,13 @@ class SequenceScanner(IScanner): self.__inner_scanner__ = inner_scanner self.__setter__ = setter + def pre_scan(self, dat): + for each in dat: + self.__inner_scanner__.pre_scan(each) + + def finish_pre_scan(self, argument): + self.__inner_scanner__.finish_pre_scan(argument) + def scan(self, dat): self.__seq__.append(self.__seq__[-1] + self.get_size(dat)) for each in dat: @@ -152,7 +230,14 @@ class DataProviderConverter(object): ] for each_sample in dat: - for each_step, scanner in zip(each_sample, scanners): + for each_step, scanner in itertools.izip(each_sample, scanners): + scanner.pre_scan(each_step) + + for scanner in scanners: + scanner.finish_pre_scan(argument) + + for each_sample in dat: + for each_step, scanner in itertools.izip(each_sample, scanners): scanner.scan(each_step) for scanner in scanners: diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index ce105d249aaf3e838443d3e0cf5996fe8c783a22..3ae8dbf964c68c6f01ba30cb3ac69fb6c2f08c30 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -83,13 +83,17 @@ def __arguments_to_numpy__(i, arg): assert isinstance(arg, swig_paddle.Arguments) value = arg.getSlotValue(i) ids = arg.getSlotIds(i) + prob = arg.getSlotIn(i) if value is not None: assert isinstance(value, swig_paddle.Matrix) value = value.copyToNumpyMat() if ids is not None: assert isinstance(ids, swig_paddle.IVector) ids = ids.copyToNumpyArray() - return {"value": value, "id": ids} + if prob is not None: + assert isinstance(prob, swig_paddle.Matrix) + prob = prob.copyToNumpyMat() + return {"value": value, "id": ids, "prob": prob} def __monkeypatch_gradient_machine__(): @@ -195,6 +199,12 @@ def __monkeypatch_gradient_machine__(): swig_paddle.GradientMachine.getParameters = getParameters + def getNonStaticParameters(self): + return (self.getNonStaticParameter(i) + for i in xrange(self.getNonStaticParameterSize())) + + swig_paddle.GradientMachine.getNonStaticParameters = getNonStaticParameters + def getLayerOutputs(self, layerNames): """ getLayerOutputs. get outputs of layers and return a numpy matrix dict. @@ -208,7 +218,7 @@ def __monkeypatch_gradient_machine__(): output = dict() for name in layerNames: - output[name] = __matrix_to_numpy__(self.getLayerOutput(name)) + output[name] = __arguments_to_numpy__(0, self.getLayerOutput(name)) return output swig_paddle.GradientMachine.getLayerOutputs = getLayerOutputs diff --git a/paddle/scripts/deb/build_scripts/.gitignore b/paddle/scripts/deb/build_scripts/.gitignore deleted file mode 100644 index 1521c8b7652b1eec8ed4fe50877aae880c758ee3..0000000000000000000000000000000000000000 --- a/paddle/scripts/deb/build_scripts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -dist diff --git a/paddle/scripts/deb/build_scripts/Dockerfile b/paddle/scripts/deb/build_scripts/Dockerfile deleted file mode 100644 index db365a65b7d33429dc1260b2ce69d6dc46abe487..0000000000000000000000000000000000000000 --- a/paddle/scripts/deb/build_scripts/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM paddledev/paddle:gpu-latest -MAINTAINER PaddlePaddle Dev Team -COPY build.sh /root/ -CMD cd /root/ && bash build.sh - diff --git a/paddle/scripts/deb/build_scripts/build.sh b/paddle/scripts/deb/build_scripts/build.sh deleted file mode 100755 index d13dea514841b110c304b8aa0e65ad16e42c75f3..0000000000000000000000000000000000000000 --- a/paddle/scripts/deb/build_scripts/build.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -set -e -apt-get update -apt-get install -y dh-make -cd ~ -mkdir -p ~/dist/gpu -mkdir -p ~/dist/cpu -mkdir -p ~/dist/cpu-noavx -mkdir -p ~/dist/gpu-noavx -cd paddle -mkdir build -cd build -cmake .. -DWITH_GPU=OFF -DWITH_SWIG_PY=ON -DWITH_AVX=ON -make -j `nproc` -cpack -D CPACK_GENERATOR='DEB' .. -mv *.deb ~/dist/cpu - -rm -rf * -cmake .. -DWITH_GPU=ON -DWITH_SWIG_PY=ON -DWITH_AVX=ON -DCUDNN_ROOT=/usr/ -make -j `nproc` -cpack -D CPACK_GENERATOR='DEB' .. -mv *.deb ~/dist/gpu - - -rm -rf * -cmake .. -DWITH_GPU=OFF -DWITH_SWIG_PY=ON -DWITH_AVX=OFF -make -j `nproc` -cpack -D CPACK_GENERATOR='DEB' .. -mv *.deb ~/dist/cpu-noavx - -rm -rf * -cmake .. -DWITH_GPU=ON -DWITH_SWIG_PY=ON -DWITH_AVX=OFF -DCUDNN_ROOT=/usr/ -make -j `nproc` -cpack -D CPACK_GENERATOR='DEB' .. -mv *.deb ~/dist/gpu-noavx diff --git a/paddle/scripts/deb/build_scripts/build_deb.sh b/paddle/scripts/deb/build_scripts/build_deb.sh deleted file mode 100755 index c38c6299f840345b7f6f6e0aad7482241d36198a..0000000000000000000000000000000000000000 --- a/paddle/scripts/deb/build_scripts/build_deb.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e -docker build -t build_paddle_deb . -rm -rf dist -mkdir -p dist -docker run -v$PWD/dist:/root/dist -v $PWD/../../../..:/root/paddle --name tmp_build_deb_container build_paddle_deb -docker rm tmp_build_deb_container -docker rmi build_paddle_deb diff --git a/paddle/scripts/deb/postinst b/paddle/scripts/deb/postinst deleted file mode 100644 index 1d2dd3171a132966832d87ae758d4e620475aed1..0000000000000000000000000000000000000000 --- a/paddle/scripts/deb/postinst +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e -echo "Post install paddle debian package." -echo "Install some python package used for paddle. You can run " -echo " pip install /usr/opt/paddle/share/wheels/*.whl to install them." -pip install /usr/opt/paddle/share/wheels/*.whl - diff --git a/paddle/scripts/docker/Dockerfile.gpu b/paddle/scripts/docker/Dockerfile.gpu deleted file mode 100644 index 538233dd0c7bb5088ea9473eb4d1a1ab62e98cb5..0000000000000000000000000000000000000000 --- a/paddle/scripts/docker/Dockerfile.gpu +++ /dev/null @@ -1,59 +0,0 @@ -FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 -MAINTAINER PaddlePaddle Authors - -ARG DEBIAN_FRONTEND=noninteractive -ARG UBUNTU_MIRROR -RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' - -RUN apt-get update && \ - apt-get install -y git python-pip python-dev openssh-server bison && \ - apt-get install -y wget unzip tar xz-utils bzip2 gzip coreutils && \ - apt-get install -y curl sed grep graphviz libjpeg-dev zlib1g-dev && \ - apt-get install -y python-numpy python-matplotlib gcc g++ gfortran && \ - apt-get install -y automake && \ - apt-get clean -y - -RUN pip install --upgrade pip && \ - pip install -U "protobuf==3.1.0" && \ - pip install -U wheel pillow BeautifulSoup && \ - pip install -U docopt PyYAML sphinx && \ - pip install -U sphinx_rtd_theme recommonmark jupyter - -RUN curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \ - cd cmake-3.4.1 && ./bootstrap && make -j `nproc` && make install && \ - cd .. && rm -rf cmake-3.4.1 - -ARG BUILD_WOBOQ -ARG BUILD_AND_INSTALL -ARG WITH_AVX -ARG WITH_DOC -ARG WITH_STYLE_CHECK - -ENV BUILD_WOBOQ=${BUILD_WOBOQ:-OFF} -ENV BUILD_AND_INSTALL=${BUILD_AND_INSTALL:-OFF} -ENV WITH_GPU=ON -ENV WITH_AVX=${WITH_AVX:-ON} -ENV WITH_DOC=${WITH_DOC:-OFF} -ENV WITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} - -RUN mkdir /paddle -COPY . /paddle/ -RUN /paddle/paddle/scripts/docker/build.sh -VOLUME ["/usr/share/nginx/html/data", "/usr/share/nginx/html/paddle"] - -# Configure OpenSSH server. c.f. https://docs.docker.com/engine/examples/running_ssh_service -RUN mkdir /var/run/sshd -RUN echo 'root:root' | chpasswd -RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config -RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config -EXPOSE 22 - -# Jupyter Notebook directory. -RUN mkdir /notes/ -WORKDIR "/notes" -EXPOSE 8888 - -RUN mkdir -p /opt/bin -COPY ./paddle/scripts/docker/entrypoint /opt/bin/ - -CMD ["/opt/bin/entrypoint"] diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..132f8cd8aaf544984e1867f63c172808d087c91f --- /dev/null +++ b/paddle/scripts/docker/README.md @@ -0,0 +1,183 @@ +# Building PaddlePaddle + +## Goals + +We want the building procedure generates Docker images so that we can run PaddlePaddle applications on Kubernetes clusters. + +We want to build .deb packages so that enterprise users can run PaddlePaddle applications without Docker. + +We want to minimize the size of generated Docker images and .deb packages so to reduce the download time. + +We want to encapsulate building tools and dependencies in a *development* Docker image so to ease the tools installation for developers. + +Developers use various editors (emacs, vim, Eclipse, Jupyter Notebook), so the development Docker image contains only building tools, not editing tools, and developers are supposed to git clone source code into their development computers and map the code into the development container. + +We want the procedure and tools also work with testing, continuous integration, and releasing. + + +## Docker Images + +So we need two Docker images for each version of PaddlePaddle: + +1. `paddle:-dev` + + This a development image contains only the development tools and standardizes the building procedure. Users include: + + - developers -- no longer need to install development tools on the host, and can build their current work on the host (development computer). + - release engineers -- use this to build the official release from certain branch/tag on Github.com. + - document writers / Website developers -- Our documents are in the source repo in the form of .md/.rst files and comments in source code. We need tools to extract the information, typeset, and generate Web pages. + + Of course, developers can install building tools on their development computers. But different versions of PaddlePaddle might require different set or version of building tools. Also, it makes collaborative debugging easier if all developers use a unified development environment. + + The development image should include the following tools: + + - gcc/clang + - nvcc + - Python + - sphinx + - woboq + - sshd + + Many developers work on a remote computer with GPU; they could ssh into the computer and `docker exec` into the development container. However, running `sshd` in the container allows developers to ssh into the container directly. + +1. `paddle:` + + This is the production image, generated using the development image. This image might have multiple variants: + + - GPU/AVX `paddle:-gpu` + - GPU/no-AVX `paddle:-gpu-noavx` + - no-GPU/AVX `paddle:` + - no-GPU/no-AVX `paddle:-noavx` + + We allow users to choose between GPU and no-GPU because the GPU version image is much larger than then the no-GPU version. + + We allow users the choice between AVX and no-AVX, because some cloud providers don't provide AVX-enabled VMs. + + +## Development Environment + +Here we describe how to use above two images. We start from considering our daily development environment. + +Developers work on a computer, which is usually a laptop or desktop: + + + +or, they might rely on a more sophisticated box (like with GPUs): + + + +A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. + + +## Usages + +### Build the Development Docker Image + +The following commands check out the source code to the host and build the development image `paddle:dev`: + +```bash +git clone https://github.com/PaddlePaddle/Paddle paddle +cd paddle +docker build -t paddle:dev . +``` + +The `docker build` command assumes that `Dockerfile` is in the root source tree. Note that in this design, this `Dockerfile` is this only one in our repo. + +Users can specify a Ubuntu mirror server for faster downloading: + +```bash +docker build -t paddle:dev --build-arg UBUNTU_MIRROR=mirror://mirrors.ubuntu.com/mirrors.txt . +``` + +### Build PaddlePaddle from Source Code + +Given the development image `paddle:dev`, the following command builds PaddlePaddle from the source tree on the development computer (host): + +```bash +docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=OFF" -e "RUN_TEST=OFF" paddle:dev +``` + +This command mounts the source directory on the host into `/paddle` in the container, so the default entry point of `paddle:dev`, `build.sh`, could build the source code with possible local changes. When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. + +`build.sh` builds the following: + +- PaddlePaddle binaries, +- `$PWD/build/paddle-.deb` for production installation, and +- `$PWD/build/Dockerfile`, which builds the production Docker image. + +Users can specify the following Docker build arguments with either "ON" or "OFF" value: +- `WITH_GPU`: ***Required***. Generates NVIDIA CUDA GPU code and relies on CUDA libraries. +- `WITH_AVX`: ***Required***. Set to "OFF" prevents from generating AVX instructions. If you don't know what is AVX, you might want to set "ON". +- `WITH_TEST`: ***Optional, default OFF***. Build unit tests binaries. Once you've built the unit tests, you can run these test manually by the following command: + ```bash + docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" paddle:dev sh -c "cd /paddle/build; make coverall" + ``` +- `RUN_TEST`: ***Optional, default OFF***. Run unit tests after building. You can't run unit tests without building it. + +### Build the Production Docker Image + +The following command builds the production image: + +```bash +docker build -t paddle -f build/Dockerfile . +``` + +This production image is minimal -- it includes binary `paddle`, the shared library `libpaddle.so`, and Python runtime. + +### Run PaddlePaddle Applications + +Again the development happens on the host. Suppose that we have a simple application program in `a.py`, we can test and run it using the production image: + +```bash +docker run --rm -it -v $PWD:/work paddle /work/a.py +``` + +But this works only if all dependencies of `a.py` are in the production image. If this is not the case, we need to build a new Docker image from the production image and with more dependencies installs. + +### Build and Run PaddlePaddle Applications + +We need a Dockerfile in https://github.com/paddlepaddle/book that builds Docker image `paddlepaddle/book:`, basing on the PaddlePaddle production image: + +``` +FROM paddlepaddle/paddle: +RUN pip install -U matplotlib jupyter ... +COPY . /book +EXPOSE 8080 +CMD ["jupyter"] +``` + +The book image is an example of PaddlePaddle application image. We can build it + +```bash +git clone https://github.com/paddlepaddle/book +cd book +docker build -t book . +``` + +### Build and Run Distributed Applications + +In our [API design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/api.md#distributed-training), we proposed an API that starts a distributed training job on a cluster. This API need to build a PaddlePaddle application into a Docker image as above and calls kubectl to run it on the cluster. This API might need to generate a Dockerfile look like above and call `docker build`. + +Of course, we can manually build an application image and launch the job using the kubectl tool: + +```bash +docker build -f some/Dockerfile -t myapp . +docker tag myapp me/myapp +docker push +kubectl ... +``` + +### Reading source code with woboq codebrowser +For developers who are interested in the C++ source code, please use -e "WOBOQ=ON" to enable the building of C++ source code into HTML pages using [Woboq codebrowser](https://github.com/woboq/woboq_codebrowser). + +- The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: + +```bash +docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" -e "WOBOQ=ON" paddle:dev +``` + +- You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: + +``` +docker run -v $HOME/woboq_out:/usr/share/nginx/html -d -p 8080:80 nginx +``` diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh old mode 100755 new mode 100644 index d9c44f42340323afb4930dab35114f2adc5fbb3a..e4c322bb18123f2062d221a3f8ab028f3b69a0d2 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -1,61 +1,107 @@ #!/bin/bash -function abort(){ - echo "An error occurred. Exiting..." 1>&2 - exit 1 -} - -trap 'abort' 0 set -e -# If Dockerfile.* sets BUILD_AND_INSTALL to 'ON', it would have copied -# source tree to /paddle, and this scripts should build it into -# /paddle/build. -if [[ ${BUILD_AND_INSTALL:-OFF} == 'ON' ]]; then - if [[ ${WITH_GPU:-OFF} == 'ON' ]]; then - ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so /usr/lib/libcudnn.so - fi - - mkdir -p /paddle/build # -p means no error if exists - cd /paddle/build - cmake .. \ - -DWITH_DOC=${WITH_DOC:-OFF} \ - -DWITH_GPU=${WITH_GPU:-OFF} \ - -DWITH_AVX=${WITH_AVX:-OFF} \ - -DWITH_SWIG_PY=ON \ - -DCUDNN_ROOT=/usr/ \ - -DWITH_STYLE_CHECK=OFF \ - -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - make -j `nproc` - make install - - if [[ ${BUILD_WOBOQ:-OFF} == 'ON' ]]; then - apt-get install -y clang-3.8 llvm-3.8 libclang-3.8-dev - # Install woboq_codebrowser. - git clone https://github.com/woboq/woboq_codebrowser /woboq - cd /woboq - cmake -DLLVM_CONFIG_EXECUTABLE=/usr/bin/llvm-config-3.8 \ - -DCMAKE_BUILD_TYPE=Release \ - . - make - - export WOBOQ_OUT=/usr/share/nginx/html/paddle - export BUILD_DIR=/paddle/build - mkdir -p $WOBOQ_OUT - cp -rv /woboq/data $WOBOQ_OUT/../data - /woboq/generator/codebrowser_generator \ +# Set BASE_IMAGE according to env variables +if [ ${WITH_GPU} == "ON" ]; then + BASE_IMAGE="nvidia/cuda:8.0-cudnn5-runtime-ubuntu14.04" + # additional packages to install when building gpu images + GPU_DOCKER_PKG="python-pip python-dev" +else + BASE_IMAGE="python:2.7.13-slim" + # FIXME: python base image uses different python version than WITH_GPU + # need to change PYTHONHOME to /usr/local when using python base image + CPU_DOCKER_PYTHON_HOME_ENV="ENV PYTHONHOME /usr/local" +fi + +DOCKERFILE_GPU_ENV="" +DOCKERFILE_CUDNN_DSO="" +if [[ ${WITH_GPU:-OFF} == 'ON' ]]; then + DOCKERFILE_GPU_ENV="ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}" + DOCKERFILE_CUDNN_DSO="RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.5 /usr/lib/x86_64-linux-gnu/libcudnn.so" +fi + +mkdir -p /paddle/build +cd /paddle/build + +# build script will not fail if *.deb does not exist +rm *.deb 2>/dev/null || true + +cmake .. \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_DOC=${WITH_DOC:-OFF} \ + -DWITH_GPU=${WITH_GPU:-OFF} \ + -DWITH_AVX=${WITH_AVX:-OFF} \ + -DWITH_SWIG_PY=ON \ + -DCUDNN_ROOT=/usr/ \ + -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} \ + -DON_COVERALLS=${WITH_TEST:-OFF} \ + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON +make -j `nproc` +if [[ ${RUN_TEST:-OFF} == "ON" ]]; then + make coveralls +fi +make install + +# generate deb package for current build +# FIXME(typhoonzero): should we remove paddle/scripts/deb ? +# FIXME: CPACK_DEBIAN_PACKAGE_DEPENDS removes all dev dependencies, must +# install them in docker +cpack -D CPACK_GENERATOR='DEB' -D CPACK_DEBIAN_PACKAGE_DEPENDS="" .. + +if [[ ${WOBOQ:-OFF} == 'ON' ]]; then + apt-get install -y clang-3.8 llvm-3.8 libclang-3.8-dev + # Install woboq_codebrowser. + git clone https://github.com/woboq/woboq_codebrowser /woboq + cd /woboq + cmake -DLLVM_CONFIG_EXECUTABLE=/usr/bin/llvm-config-3.8 \ + -DCMAKE_BUILD_TYPE=Release \ + . + make + + export WOBOQ_OUT=/woboq_out/paddle + export BUILD_DIR=/paddle/build + mkdir -p $WOBOQ_OUT + cp -rv /woboq/data $WOBOQ_OUT/../data + /woboq/generator/codebrowser_generator \ -b /paddle/build \ -a \ -o $WOBOQ_OUT \ -p paddle:/paddle - /woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT - cd /woboq - make clean - fi + /woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT + cd /woboq + make clean +fi - pip install /usr/local/opt/paddle/share/wheels/py_paddle*linux*.whl - pip install /usr/local/opt/paddle/share/wheels/paddle*.whl - paddle version +paddle version + +if [[ -n ${APT_MIRROR} ]]; then + MIRROR_UPDATE="sed -i '${APT_MIRROR}' /etc/apt/sources.list && \\" +else + MIRROR_UPDATE="\\" fi -trap : 0 +cat > /paddle/build/Dockerfile < +ENV HOME /root +ENV LANG en_US.UTF-8 +# Use Fix locales to en_US.UTF-8 +RUN ${MIRROR_UPDATE} + apt-get update && \ + apt-get install -y libgfortran3 libpython2.7 ${GPU_DOCKER_PKG} && \ + apt-get clean -y && \ + pip install --upgrade pip && \ + pip install -U 'protobuf==3.1.0' requests numpy +# Use different deb file when building different type of images +ADD build/*.deb /usr/local/opt/paddle/deb/ +# run paddle version to install python packages first +RUN dpkg -i /usr/local/opt/paddle/deb/*.deb && \ + rm -f /usr/local/opt/paddle/deb/*.deb && \ + paddle version +${CPU_DOCKER_PYTHON_HOME_ENV} +${DOCKERFILE_CUDNN_DSO} +${DOCKERFILE_GPU_ENV} +# default command shows the paddle version and exit +CMD ["paddle", "version"] +EOF diff --git a/paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle b/paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle new file mode 100644 index 0000000000000000000000000000000000000000..4629f9b9da7ababdafa0b964db18a98a819c6a9e Binary files /dev/null and b/paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle differ diff --git a/paddle/scripts/docker/doc/paddle-development-environment-gpu.png b/paddle/scripts/docker/doc/paddle-development-environment-gpu.png new file mode 100644 index 0000000000000000000000000000000000000000..61a96d7198d013f08f0f9c269cc352da5f7dd2e9 Binary files /dev/null and b/paddle/scripts/docker/doc/paddle-development-environment-gpu.png differ diff --git a/paddle/scripts/docker/doc/paddle-development-environment.graffle b/paddle/scripts/docker/doc/paddle-development-environment.graffle new file mode 100644 index 0000000000000000000000000000000000000000..5b164c4832809de94ead7309af49c579135d7f48 Binary files /dev/null and b/paddle/scripts/docker/doc/paddle-development-environment.graffle differ diff --git a/paddle/scripts/docker/doc/paddle-development-environment.png b/paddle/scripts/docker/doc/paddle-development-environment.png new file mode 100644 index 0000000000000000000000000000000000000000..707ed45a335a981c23b3533984045f53848b55e2 Binary files /dev/null and b/paddle/scripts/docker/doc/paddle-development-environment.png differ diff --git a/paddle/scripts/docker/entrypoint b/paddle/scripts/docker/entrypoint index 87083467f50acd689ce57b86951f5f7a03c6a58b..bc194bd909aa308fd5fe920c9319f62a0ec2dac7 100755 --- a/paddle/scripts/docker/entrypoint +++ b/paddle/scripts/docker/entrypoint @@ -1,8 +1,4 @@ #!/bin/bash -LOG=/var/log/all -touch $LOG - -/usr/sbin/sshd -D >> $LOG & -jupyter notebook --ip=0.0.0.0 /notes/ >> $LOG & -tail -f $LOG +/usr/sbin/sshd -D & +jupyter notebook --ip=0.0.0.0 /paddle/book/ diff --git a/paddle/scripts/docker/root/.bashrc b/paddle/scripts/docker/root/.bashrc new file mode 100755 index 0000000000000000000000000000000000000000..4b3024e4e81a0fa206a796c12a8b9d72f1a8f5d9 --- /dev/null +++ b/paddle/scripts/docker/root/.bashrc @@ -0,0 +1,46 @@ +# Locales + +export LC_ALL=en_US.UTF-8 +export LANG=en_US.UTF-8 +export LANGUAGE=en_US.UTF-8 + +# Aliases + +alias rm='rm -i' +alias cp='cp -i' +alias mv='mv -i' + +alias ls='ls -hFG' +alias l='ls -lF' +alias ll='ls -alF' +alias lt='ls -ltrF' +alias ll='ls -alF' +alias lls='ls -alSrF' +alias llt='ls -altrF' + +# Colorize directory listing + +alias ls="ls -ph --color=auto" + +# Colorize grep + +if echo hello|grep --color=auto l >/dev/null 2>&1; then + export GREP_OPTIONS="--color=auto" GREP_COLOR="1;31" +fi + +# Shell + +export CLICOLOR="1" + +YELLOW="\[\033[1;33m\]" +NO_COLOUR="\[\033[0m\]" +GREEN="\[\033[1;32m\]" +WHITE="\[\033[1;37m\]" + +source ~/.scripts/git-prompt.sh + +export PS1="\[\033[1;33m\]λ $WHITE\h $GREEN\w$YELLOW\$(__git_ps1 \" \[\033[35m\]{\[\033[36m\]%s\[\033[35m\]}\")$NO_COLOUR " + +# Git + +source ~/.scripts/git-completion.sh diff --git a/paddle/scripts/docker/root/.gitconfig b/paddle/scripts/docker/root/.gitconfig new file mode 100755 index 0000000000000000000000000000000000000000..6c249803a50403b9b79e36a13abe7fe88a35729d --- /dev/null +++ b/paddle/scripts/docker/root/.gitconfig @@ -0,0 +1,43 @@ +[user] + name = + email = + +[alias] + st = status --branch --short + ci = commit + br = branch + co = checkout + df = diff + l = log --pretty=format:\"%h %ad | %s%d [%an]\" --graph --date=short + ll = log --stat + +[merge] + tool = vimdiff + +[core] + excludesfile = ~/.gitignore + editor = vim + +[color] + branch = auto + diff = auto + status = auto + +[color "branch"] + current = yellow reverse + local = yellow + remote = green + +[color "diff"] + meta = yellow bold + frag = magenta bold + old = red bold + new = green bold + +[color "status"] + added = yellow + changed = green + untracked = cyan + +[push] + default = matching \ No newline at end of file diff --git a/paddle/scripts/docker/root/.scripts/git-completion.sh b/paddle/scripts/docker/root/.scripts/git-completion.sh new file mode 100755 index 0000000000000000000000000000000000000000..bdddef5ac2faf50b47dd03539dae8912bec8a16c --- /dev/null +++ b/paddle/scripts/docker/root/.scripts/git-completion.sh @@ -0,0 +1,2663 @@ +#!bash +# +# bash/zsh completion support for core Git. +# +# Copyright (C) 2006,2007 Shawn O. Pearce +# Conceptually based on gitcompletion (http://gitweb.hawaga.org.uk/). +# Distributed under the GNU General Public License, version 2.0. +# +# The contained completion routines provide support for completing: +# +# *) local and remote branch names +# *) local and remote tag names +# *) .git/remotes file names +# *) git 'subcommands' +# *) tree paths within 'ref:path/to/file' expressions +# *) file paths within current working directory and index +# *) common --long-options +# +# To use these routines: +# +# 1) Copy this file to somewhere (e.g. ~/.git-completion.sh). +# 2) Add the following line to your .bashrc/.zshrc: +# source ~/.git-completion.sh +# 3) Consider changing your PS1 to also show the current branch, +# see git-prompt.sh for details. + +case "$COMP_WORDBREAKS" in +*:*) : great ;; +*) COMP_WORDBREAKS="$COMP_WORDBREAKS:" +esac + +# __gitdir accepts 0 or 1 arguments (i.e., location) +# returns location of .git repo +__gitdir () +{ + if [ -z "${1-}" ]; then + if [ -n "${__git_dir-}" ]; then + echo "$__git_dir" + elif [ -n "${GIT_DIR-}" ]; then + test -d "${GIT_DIR-}" || return 1 + echo "$GIT_DIR" + elif [ -d .git ]; then + echo .git + else + git rev-parse --git-dir 2>/dev/null + fi + elif [ -d "$1/.git" ]; then + echo "$1/.git" + else + echo "$1" + fi +} + +# The following function is based on code from: +# +# bash_completion - programmable completion functions for bash 3.2+ +# +# Copyright © 2006-2008, Ian Macdonald +# © 2009-2010, Bash Completion Maintainers +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# The latest version of this software can be obtained here: +# +# http://bash-completion.alioth.debian.org/ +# +# RELEASE: 2.x + +# This function can be used to access a tokenized list of words +# on the command line: +# +# __git_reassemble_comp_words_by_ref '=:' +# if test "${words_[cword_-1]}" = -w +# then +# ... +# fi +# +# The argument should be a collection of characters from the list of +# word completion separators (COMP_WORDBREAKS) to treat as ordinary +# characters. +# +# This is roughly equivalent to going back in time and setting +# COMP_WORDBREAKS to exclude those characters. The intent is to +# make option types like --date= and : easy to +# recognize by treating each shell word as a single token. +# +# It is best not to set COMP_WORDBREAKS directly because the value is +# shared with other completion scripts. By the time the completion +# function gets called, COMP_WORDS has already been populated so local +# changes to COMP_WORDBREAKS have no effect. +# +# Output: words_, cword_, cur_. + +__git_reassemble_comp_words_by_ref() +{ + local exclude i j first + # Which word separators to exclude? + exclude="${1//[^$COMP_WORDBREAKS]}" + cword_=$COMP_CWORD + if [ -z "$exclude" ]; then + words_=("${COMP_WORDS[@]}") + return + fi + # List of word completion separators has shrunk; + # re-assemble words to complete. + for ((i=0, j=0; i < ${#COMP_WORDS[@]}; i++, j++)); do + # Append each nonempty word consisting of just + # word separator characters to the current word. + first=t + while + [ $i -gt 0 ] && + [ -n "${COMP_WORDS[$i]}" ] && + # word consists of excluded word separators + [ "${COMP_WORDS[$i]//[^$exclude]}" = "${COMP_WORDS[$i]}" ] + do + # Attach to the previous token, + # unless the previous token is the command name. + if [ $j -ge 2 ] && [ -n "$first" ]; then + ((j--)) + fi + first= + words_[$j]=${words_[j]}${COMP_WORDS[i]} + if [ $i = $COMP_CWORD ]; then + cword_=$j + fi + if (($i < ${#COMP_WORDS[@]} - 1)); then + ((i++)) + else + # Done. + return + fi + done + words_[$j]=${words_[j]}${COMP_WORDS[i]} + if [ $i = $COMP_CWORD ]; then + cword_=$j + fi + done +} + +if ! type _get_comp_words_by_ref >/dev/null 2>&1; then +_get_comp_words_by_ref () +{ + local exclude cur_ words_ cword_ + if [ "$1" = "-n" ]; then + exclude=$2 + shift 2 + fi + __git_reassemble_comp_words_by_ref "$exclude" + cur_=${words_[cword_]} + while [ $# -gt 0 ]; do + case "$1" in + cur) + cur=$cur_ + ;; + prev) + prev=${words_[$cword_-1]} + ;; + words) + words=("${words_[@]}") + ;; + cword) + cword=$cword_ + ;; + esac + shift + done +} +fi + +__gitcompadd () +{ + local i=0 + for x in $1; do + if [[ "$x" == "$3"* ]]; then + COMPREPLY[i++]="$2$x$4" + fi + done +} + +# Generates completion reply, appending a space to possible completion words, +# if necessary. +# It accepts 1 to 4 arguments: +# 1: List of possible completion words. +# 2: A prefix to be added to each possible completion word (optional). +# 3: Generate possible completion matches for this word (optional). +# 4: A suffix to be appended to each possible completion word (optional). +__gitcomp () +{ + local cur_="${3-$cur}" + + case "$cur_" in + --*=) + ;; + *) + local c i=0 IFS=$' \t\n' + for c in $1; do + c="$c${4-}" + if [[ $c == "$cur_"* ]]; then + case $c in + --*=*|*.) ;; + *) c="$c " ;; + esac + COMPREPLY[i++]="${2-}$c" + fi + done + ;; + esac +} + +# Generates completion reply from newline-separated possible completion words +# by appending a space to all of them. +# It accepts 1 to 4 arguments: +# 1: List of possible completion words, separated by a single newline. +# 2: A prefix to be added to each possible completion word (optional). +# 3: Generate possible completion matches for this word (optional). +# 4: A suffix to be appended to each possible completion word instead of +# the default space (optional). If specified but empty, nothing is +# appended. +__gitcomp_nl () +{ + local IFS=$'\n' + __gitcompadd "$1" "${2-}" "${3-$cur}" "${4- }" +} + +# Generates completion reply with compgen from newline-separated possible +# completion filenames. +# It accepts 1 to 3 arguments: +# 1: List of possible completion filenames, separated by a single newline. +# 2: A directory prefix to be added to each possible completion filename +# (optional). +# 3: Generate possible completion matches for this word (optional). +__gitcomp_file () +{ + local IFS=$'\n' + + # XXX does not work when the directory prefix contains a tilde, + # since tilde expansion is not applied. + # This means that COMPREPLY will be empty and Bash default + # completion will be used. + __gitcompadd "$1" "${2-}" "${3-$cur}" "" + + # use a hack to enable file mode in bash < 4 + compopt -o filenames +o nospace 2>/dev/null || + compgen -f /non-existing-dir/ > /dev/null +} + +# Execute 'git ls-files', unless the --committable option is specified, in +# which case it runs 'git diff-index' to find out the files that can be +# committed. It return paths relative to the directory specified in the first +# argument, and using the options specified in the second argument. +__git_ls_files_helper () +{ + ( + test -n "${CDPATH+set}" && unset CDPATH + cd "$1" + if [ "$2" == "--committable" ]; then + git diff-index --name-only --relative HEAD + else + # NOTE: $2 is not quoted in order to support multiple options + git ls-files --exclude-standard $2 + fi + ) 2>/dev/null +} + + +# __git_index_files accepts 1 or 2 arguments: +# 1: Options to pass to ls-files (required). +# 2: A directory path (optional). +# If provided, only files within the specified directory are listed. +# Sub directories are never recursed. Path must have a trailing +# slash. +__git_index_files () +{ + local dir="$(__gitdir)" root="${2-.}" file + + if [ -d "$dir" ]; then + __git_ls_files_helper "$root" "$1" | + while read -r file; do + case "$file" in + ?*/*) echo "${file%%/*}" ;; + *) echo "$file" ;; + esac + done | sort | uniq + fi +} + +__git_heads () +{ + local dir="$(__gitdir)" + if [ -d "$dir" ]; then + git --git-dir="$dir" for-each-ref --format='%(refname:short)' \ + refs/heads + return + fi +} + +__git_tags () +{ + local dir="$(__gitdir)" + if [ -d "$dir" ]; then + git --git-dir="$dir" for-each-ref --format='%(refname:short)' \ + refs/tags + return + fi +} + +# __git_refs accepts 0, 1 (to pass to __gitdir), or 2 arguments +# presence of 2nd argument means use the guess heuristic employed +# by checkout for tracking branches +__git_refs () +{ + local i hash dir="$(__gitdir "${1-}")" track="${2-}" + local format refs + if [ -d "$dir" ]; then + case "$cur" in + refs|refs/*) + format="refname" + refs="${cur%/*}" + track="" + ;; + *) + for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do + if [ -e "$dir/$i" ]; then echo $i; fi + done + format="refname:short" + refs="refs/tags refs/heads refs/remotes" + ;; + esac + git --git-dir="$dir" for-each-ref --format="%($format)" \ + $refs + if [ -n "$track" ]; then + # employ the heuristic used by git checkout + # Try to find a remote branch that matches the completion word + # but only output if the branch name is unique + local ref entry + git --git-dir="$dir" for-each-ref --shell --format="ref=%(refname:short)" \ + "refs/remotes/" | \ + while read -r entry; do + eval "$entry" + ref="${ref#*/}" + if [[ "$ref" == "$cur"* ]]; then + echo "$ref" + fi + done | sort | uniq -u + fi + return + fi + case "$cur" in + refs|refs/*) + git ls-remote "$dir" "$cur*" 2>/dev/null | \ + while read -r hash i; do + case "$i" in + *^{}) ;; + *) echo "$i" ;; + esac + done + ;; + *) + echo "HEAD" + git for-each-ref --format="%(refname:short)" -- "refs/remotes/$dir/" | sed -e "s#^$dir/##" + ;; + esac +} + +# __git_refs2 requires 1 argument (to pass to __git_refs) +__git_refs2 () +{ + local i + for i in $(__git_refs "$1"); do + echo "$i:$i" + done +} + +# __git_refs_remotes requires 1 argument (to pass to ls-remote) +__git_refs_remotes () +{ + local i hash + git ls-remote "$1" 'refs/heads/*' 2>/dev/null | \ + while read -r hash i; do + echo "$i:refs/remotes/$1/${i#refs/heads/}" + done +} + +__git_remotes () +{ + local i IFS=$'\n' d="$(__gitdir)" + test -d "$d/remotes" && ls -1 "$d/remotes" + for i in $(git --git-dir="$d" config --get-regexp 'remote\..*\.url' 2>/dev/null); do + i="${i#remote.}" + echo "${i/.url*/}" + done +} + +__git_list_merge_strategies () +{ + git merge -s help 2>&1 | + sed -n -e '/[Aa]vailable strategies are: /,/^$/{ + s/\.$// + s/.*:// + s/^[ ]*// + s/[ ]*$// + p + }' +} + +__git_merge_strategies= +# 'git merge -s help' (and thus detection of the merge strategy +# list) fails, unfortunately, if run outside of any git working +# tree. __git_merge_strategies is set to the empty string in +# that case, and the detection will be repeated the next time it +# is needed. +__git_compute_merge_strategies () +{ + test -n "$__git_merge_strategies" || + __git_merge_strategies=$(__git_list_merge_strategies) +} + +__git_complete_revlist_file () +{ + local pfx ls ref cur_="$cur" + case "$cur_" in + *..?*:*) + return + ;; + ?*:*) + ref="${cur_%%:*}" + cur_="${cur_#*:}" + case "$cur_" in + ?*/*) + pfx="${cur_%/*}" + cur_="${cur_##*/}" + ls="$ref:$pfx" + pfx="$pfx/" + ;; + *) + ls="$ref" + ;; + esac + + case "$COMP_WORDBREAKS" in + *:*) : great ;; + *) pfx="$ref:$pfx" ;; + esac + + __gitcomp_nl "$(git --git-dir="$(__gitdir)" ls-tree "$ls" 2>/dev/null \ + | sed '/^100... blob /{ + s,^.* ,, + s,$, , + } + /^120000 blob /{ + s,^.* ,, + s,$, , + } + /^040000 tree /{ + s,^.* ,, + s,$,/, + } + s/^.* //')" \ + "$pfx" "$cur_" "" + ;; + *...*) + pfx="${cur_%...*}..." + cur_="${cur_#*...}" + __gitcomp_nl "$(__git_refs)" "$pfx" "$cur_" + ;; + *..*) + pfx="${cur_%..*}.." + cur_="${cur_#*..}" + __gitcomp_nl "$(__git_refs)" "$pfx" "$cur_" + ;; + *) + __gitcomp_nl "$(__git_refs)" + ;; + esac +} + + +# __git_complete_index_file requires 1 argument: +# 1: the options to pass to ls-file +# +# The exception is --committable, which finds the files appropriate commit. +__git_complete_index_file () +{ + local pfx="" cur_="$cur" + + case "$cur_" in + ?*/*) + pfx="${cur_%/*}" + cur_="${cur_##*/}" + pfx="${pfx}/" + ;; + esac + + __gitcomp_file "$(__git_index_files "$1" "$pfx")" "$pfx" "$cur_" +} + +__git_complete_file () +{ + __git_complete_revlist_file +} + +__git_complete_revlist () +{ + __git_complete_revlist_file +} + +__git_complete_remote_or_refspec () +{ + local cur_="$cur" cmd="${words[1]}" + local i c=2 remote="" pfx="" lhs=1 no_complete_refspec=0 + if [ "$cmd" = "remote" ]; then + ((c++)) + fi + while [ $c -lt $cword ]; do + i="${words[c]}" + case "$i" in + --mirror) [ "$cmd" = "push" ] && no_complete_refspec=1 ;; + --all) + case "$cmd" in + push) no_complete_refspec=1 ;; + fetch) + return + ;; + *) ;; + esac + ;; + -*) ;; + *) remote="$i"; break ;; + esac + ((c++)) + done + if [ -z "$remote" ]; then + __gitcomp_nl "$(__git_remotes)" + return + fi + if [ $no_complete_refspec = 1 ]; then + return + fi + [ "$remote" = "." ] && remote= + case "$cur_" in + *:*) + case "$COMP_WORDBREAKS" in + *:*) : great ;; + *) pfx="${cur_%%:*}:" ;; + esac + cur_="${cur_#*:}" + lhs=0 + ;; + +*) + pfx="+" + cur_="${cur_#+}" + ;; + esac + case "$cmd" in + fetch) + if [ $lhs = 1 ]; then + __gitcomp_nl "$(__git_refs2 "$remote")" "$pfx" "$cur_" + else + __gitcomp_nl "$(__git_refs)" "$pfx" "$cur_" + fi + ;; + pull|remote) + if [ $lhs = 1 ]; then + __gitcomp_nl "$(__git_refs "$remote")" "$pfx" "$cur_" + else + __gitcomp_nl "$(__git_refs)" "$pfx" "$cur_" + fi + ;; + push) + if [ $lhs = 1 ]; then + __gitcomp_nl "$(__git_refs)" "$pfx" "$cur_" + else + __gitcomp_nl "$(__git_refs "$remote")" "$pfx" "$cur_" + fi + ;; + esac +} + +__git_complete_strategy () +{ + __git_compute_merge_strategies + case "$prev" in + -s|--strategy) + __gitcomp "$__git_merge_strategies" + return 0 + esac + case "$cur" in + --strategy=*) + __gitcomp "$__git_merge_strategies" "" "${cur##--strategy=}" + return 0 + ;; + esac + return 1 +} + +__git_commands () { + if test -n "${GIT_TESTING_COMMAND_COMPLETION:-}" + then + printf "%s" "${GIT_TESTING_COMMAND_COMPLETION}" + else + git help -a|egrep '^ [a-zA-Z0-9]' + fi +} + +__git_list_all_commands () +{ + local i IFS=" "$'\n' + for i in $(__git_commands) + do + case $i in + *--*) : helper pattern;; + *) echo $i;; + esac + done +} + +__git_all_commands= +__git_compute_all_commands () +{ + test -n "$__git_all_commands" || + __git_all_commands=$(__git_list_all_commands) +} + +__git_list_porcelain_commands () +{ + local i IFS=" "$'\n' + __git_compute_all_commands + for i in $__git_all_commands + do + case $i in + *--*) : helper pattern;; + applymbox) : ask gittus;; + applypatch) : ask gittus;; + archimport) : import;; + cat-file) : plumbing;; + check-attr) : plumbing;; + check-ignore) : plumbing;; + check-mailmap) : plumbing;; + check-ref-format) : plumbing;; + checkout-index) : plumbing;; + commit-tree) : plumbing;; + count-objects) : infrequent;; + credential-cache) : credentials helper;; + credential-store) : credentials helper;; + cvsexportcommit) : export;; + cvsimport) : import;; + cvsserver) : daemon;; + daemon) : daemon;; + diff-files) : plumbing;; + diff-index) : plumbing;; + diff-tree) : plumbing;; + fast-import) : import;; + fast-export) : export;; + fsck-objects) : plumbing;; + fetch-pack) : plumbing;; + fmt-merge-msg) : plumbing;; + for-each-ref) : plumbing;; + hash-object) : plumbing;; + http-*) : transport;; + index-pack) : plumbing;; + init-db) : deprecated;; + local-fetch) : plumbing;; + lost-found) : infrequent;; + ls-files) : plumbing;; + ls-remote) : plumbing;; + ls-tree) : plumbing;; + mailinfo) : plumbing;; + mailsplit) : plumbing;; + merge-*) : plumbing;; + mktree) : plumbing;; + mktag) : plumbing;; + pack-objects) : plumbing;; + pack-redundant) : plumbing;; + pack-refs) : plumbing;; + parse-remote) : plumbing;; + patch-id) : plumbing;; + peek-remote) : plumbing;; + prune) : plumbing;; + prune-packed) : plumbing;; + quiltimport) : import;; + read-tree) : plumbing;; + receive-pack) : plumbing;; + remote-*) : transport;; + repo-config) : deprecated;; + rerere) : plumbing;; + rev-list) : plumbing;; + rev-parse) : plumbing;; + runstatus) : plumbing;; + sh-setup) : internal;; + shell) : daemon;; + show-ref) : plumbing;; + send-pack) : plumbing;; + show-index) : plumbing;; + ssh-*) : transport;; + stripspace) : plumbing;; + symbolic-ref) : plumbing;; + tar-tree) : deprecated;; + unpack-file) : plumbing;; + unpack-objects) : plumbing;; + update-index) : plumbing;; + update-ref) : plumbing;; + update-server-info) : daemon;; + upload-archive) : plumbing;; + upload-pack) : plumbing;; + write-tree) : plumbing;; + var) : infrequent;; + verify-pack) : infrequent;; + verify-tag) : plumbing;; + *) echo $i;; + esac + done +} + +__git_porcelain_commands= +__git_compute_porcelain_commands () +{ + __git_compute_all_commands + test -n "$__git_porcelain_commands" || + __git_porcelain_commands=$(__git_list_porcelain_commands) +} + +__git_pretty_aliases () +{ + local i IFS=$'\n' + for i in $(git --git-dir="$(__gitdir)" config --get-regexp "pretty\..*" 2>/dev/null); do + case "$i" in + pretty.*) + i="${i#pretty.}" + echo "${i/ */}" + ;; + esac + done +} + +__git_aliases () +{ + local i IFS=$'\n' + for i in $(git --git-dir="$(__gitdir)" config --get-regexp "alias\..*" 2>/dev/null); do + case "$i" in + alias.*) + i="${i#alias.}" + echo "${i/ */}" + ;; + esac + done +} + +# __git_aliased_command requires 1 argument +__git_aliased_command () +{ + local word cmdline=$(git --git-dir="$(__gitdir)" \ + config --get "alias.$1") + for word in $cmdline; do + case "$word" in + \!gitk|gitk) + echo "gitk" + return + ;; + \!*) : shell command alias ;; + -*) : option ;; + *=*) : setting env ;; + git) : git itself ;; + *) + echo "$word" + return + esac + done +} + +# __git_find_on_cmdline requires 1 argument +__git_find_on_cmdline () +{ + local word subcommand c=1 + while [ $c -lt $cword ]; do + word="${words[c]}" + for subcommand in $1; do + if [ "$subcommand" = "$word" ]; then + echo "$subcommand" + return + fi + done + ((c++)) + done +} + +__git_has_doubledash () +{ + local c=1 + while [ $c -lt $cword ]; do + if [ "--" = "${words[c]}" ]; then + return 0 + fi + ((c++)) + done + return 1 +} + +# Try to count non option arguments passed on the command line for the +# specified git command. +# When options are used, it is necessary to use the special -- option to +# tell the implementation were non option arguments begin. +# XXX this can not be improved, since options can appear everywhere, as +# an example: +# git mv x -n y +# +# __git_count_arguments requires 1 argument: the git command executed. +__git_count_arguments () +{ + local word i c=0 + + # Skip "git" (first argument) + for ((i=1; i < ${#words[@]}; i++)); do + word="${words[i]}" + + case "$word" in + --) + # Good; we can assume that the following are only non + # option arguments. + ((c = 0)) + ;; + "$1") + # Skip the specified git command and discard git + # main options + ((c = 0)) + ;; + ?*) + ((c++)) + ;; + esac + done + + printf "%d" $c +} + +__git_whitespacelist="nowarn warn error error-all fix" + +_git_am () +{ + local dir="$(__gitdir)" + if [ -d "$dir"/rebase-apply ]; then + __gitcomp "--skip --continue --resolved --abort" + return + fi + case "$cur" in + --whitespace=*) + __gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}" + return + ;; + --*) + __gitcomp " + --3way --committer-date-is-author-date --ignore-date + --ignore-whitespace --ignore-space-change + --interactive --keep --no-utf8 --signoff --utf8 + --whitespace= --scissors + " + return + esac +} + +_git_apply () +{ + case "$cur" in + --whitespace=*) + __gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}" + return + ;; + --*) + __gitcomp " + --stat --numstat --summary --check --index + --cached --index-info --reverse --reject --unidiff-zero + --apply --no-add --exclude= + --ignore-whitespace --ignore-space-change + --whitespace= --inaccurate-eof --verbose + " + return + esac +} + +_git_add () +{ + case "$cur" in + --*) + __gitcomp " + --interactive --refresh --patch --update --dry-run + --ignore-errors --intent-to-add + " + return + esac + + # XXX should we check for --update and --all options ? + __git_complete_index_file "--others --modified" +} + +_git_archive () +{ + case "$cur" in + --format=*) + __gitcomp "$(git archive --list)" "" "${cur##--format=}" + return + ;; + --remote=*) + __gitcomp_nl "$(__git_remotes)" "" "${cur##--remote=}" + return + ;; + --*) + __gitcomp " + --format= --list --verbose + --prefix= --remote= --exec= + " + return + ;; + esac + __git_complete_file +} + +_git_bisect () +{ + __git_has_doubledash && return + + local subcommands="start bad good skip reset visualize replay log run" + local subcommand="$(__git_find_on_cmdline "$subcommands")" + if [ -z "$subcommand" ]; then + if [ -f "$(__gitdir)"/BISECT_START ]; then + __gitcomp "$subcommands" + else + __gitcomp "replay start" + fi + return + fi + + case "$subcommand" in + bad|good|reset|skip|start) + __gitcomp_nl "$(__git_refs)" + ;; + *) + ;; + esac +} + +_git_branch () +{ + local i c=1 only_local_ref="n" has_r="n" + + while [ $c -lt $cword ]; do + i="${words[c]}" + case "$i" in + -d|-m) only_local_ref="y" ;; + -r) has_r="y" ;; + esac + ((c++)) + done + + case "$cur" in + --set-upstream-to=*) + __gitcomp "$(__git_refs)" "" "${cur##--set-upstream-to=}" + ;; + --*) + __gitcomp " + --color --no-color --verbose --abbrev= --no-abbrev + --track --no-track --contains --merged --no-merged + --set-upstream-to= --edit-description --list + --unset-upstream + " + ;; + *) + if [ $only_local_ref = "y" -a $has_r = "n" ]; then + __gitcomp_nl "$(__git_heads)" + else + __gitcomp_nl "$(__git_refs)" + fi + ;; + esac +} + +_git_bundle () +{ + local cmd="${words[2]}" + case "$cword" in + 2) + __gitcomp "create list-heads verify unbundle" + ;; + 3) + # looking for a file + ;; + *) + case "$cmd" in + create) + __git_complete_revlist + ;; + esac + ;; + esac +} + +_git_checkout () +{ + __git_has_doubledash && return + + case "$cur" in + --conflict=*) + __gitcomp "diff3 merge" "" "${cur##--conflict=}" + ;; + --*) + __gitcomp " + --quiet --ours --theirs --track --no-track --merge + --conflict= --orphan --patch + " + ;; + *) + # check if --track, --no-track, or --no-guess was specified + # if so, disable DWIM mode + local flags="--track --no-track --no-guess" track=1 + if [ -n "$(__git_find_on_cmdline "$flags")" ]; then + track='' + fi + __gitcomp_nl "$(__git_refs '' $track)" + ;; + esac +} + +_git_cherry () +{ + __gitcomp "$(__git_refs)" +} + +_git_cherry_pick () +{ + local dir="$(__gitdir)" + if [ -f "$dir"/CHERRY_PICK_HEAD ]; then + __gitcomp "--continue --quit --abort" + return + fi + case "$cur" in + --*) + __gitcomp "--edit --no-commit --signoff --strategy= --mainline" + ;; + *) + __gitcomp_nl "$(__git_refs)" + ;; + esac +} + +_git_clean () +{ + case "$cur" in + --*) + __gitcomp "--dry-run --quiet" + return + ;; + esac + + # XXX should we check for -x option ? + __git_complete_index_file "--others" +} + +_git_clone () +{ + case "$cur" in + --*) + __gitcomp " + --local + --no-hardlinks + --shared + --reference + --quiet + --no-checkout + --bare + --mirror + --origin + --upload-pack + --template= + --depth + --single-branch + --branch + " + return + ;; + esac +} + +_git_commit () +{ + case "$prev" in + -c|-C) + __gitcomp_nl "$(__git_refs)" "" "${cur}" + return + ;; + esac + + case "$cur" in + --cleanup=*) + __gitcomp "default strip verbatim whitespace + " "" "${cur##--cleanup=}" + return + ;; + --reuse-message=*|--reedit-message=*|\ + --fixup=*|--squash=*) + __gitcomp_nl "$(__git_refs)" "" "${cur#*=}" + return + ;; + --untracked-files=*) + __gitcomp "all no normal" "" "${cur##--untracked-files=}" + return + ;; + --*) + __gitcomp " + --all --author= --signoff --verify --no-verify + --edit --no-edit + --amend --include --only --interactive + --dry-run --reuse-message= --reedit-message= + --reset-author --file= --message= --template= + --cleanup= --untracked-files --untracked-files= + --verbose --quiet --fixup= --squash= + " + return + esac + + if git rev-parse --verify --quiet HEAD >/dev/null; then + __git_complete_index_file "--committable" + else + # This is the first commit + __git_complete_index_file "--cached" + fi +} + +_git_describe () +{ + case "$cur" in + --*) + __gitcomp " + --all --tags --contains --abbrev= --candidates= + --exact-match --debug --long --match --always + " + return + esac + __gitcomp_nl "$(__git_refs)" +} + +__git_diff_algorithms="myers minimal patience histogram" + +__git_diff_common_options="--stat --numstat --shortstat --summary + --patch-with-stat --name-only --name-status --color + --no-color --color-words --no-renames --check + --full-index --binary --abbrev --diff-filter= + --find-copies-harder + --text --ignore-space-at-eol --ignore-space-change + --ignore-all-space --exit-code --quiet --ext-diff + --no-ext-diff + --no-prefix --src-prefix= --dst-prefix= + --inter-hunk-context= + --patience --histogram --minimal + --raw --word-diff + --dirstat --dirstat= --dirstat-by-file + --dirstat-by-file= --cumulative + --diff-algorithm= +" + +_git_diff () +{ + __git_has_doubledash && return + + case "$cur" in + --diff-algorithm=*) + __gitcomp "$__git_diff_algorithms" "" "${cur##--diff-algorithm=}" + return + ;; + --*) + __gitcomp "--cached --staged --pickaxe-all --pickaxe-regex + --base --ours --theirs --no-index + $__git_diff_common_options + " + return + ;; + esac + __git_complete_revlist_file +} + +__git_mergetools_common="diffuse ecmerge emerge kdiff3 meld opendiff + tkdiff vimdiff gvimdiff xxdiff araxis p4merge bc3 codecompare +" + +_git_difftool () +{ + __git_has_doubledash && return + + case "$cur" in + --tool=*) + __gitcomp "$__git_mergetools_common kompare" "" "${cur##--tool=}" + return + ;; + --*) + __gitcomp "--cached --staged --pickaxe-all --pickaxe-regex + --base --ours --theirs + --no-renames --diff-filter= --find-copies-harder + --relative --ignore-submodules + --tool=" + return + ;; + esac + __git_complete_revlist_file +} + +__git_fetch_options=" + --quiet --verbose --append --upload-pack --force --keep --depth= + --tags --no-tags --all --prune --dry-run +" + +_git_fetch () +{ + case "$cur" in + --*) + __gitcomp "$__git_fetch_options" + return + ;; + esac + __git_complete_remote_or_refspec +} + +__git_format_patch_options=" + --stdout --attach --no-attach --thread --thread= --no-thread + --numbered --start-number --numbered-files --keep-subject --signoff + --signature --no-signature --in-reply-to= --cc= --full-index --binary + --not --all --cover-letter --no-prefix --src-prefix= --dst-prefix= + --inline --suffix= --ignore-if-in-upstream --subject-prefix= + --output-directory --reroll-count --to= --quiet --notes +" + +_git_format_patch () +{ + case "$cur" in + --thread=*) + __gitcomp " + deep shallow + " "" "${cur##--thread=}" + return + ;; + --*) + __gitcomp "$__git_format_patch_options" + return + ;; + esac + __git_complete_revlist +} + +_git_fsck () +{ + case "$cur" in + --*) + __gitcomp " + --tags --root --unreachable --cache --no-reflogs --full + --strict --verbose --lost-found + " + return + ;; + esac +} + +_git_gc () +{ + case "$cur" in + --*) + __gitcomp "--prune --aggressive" + return + ;; + esac +} + +_git_gitk () +{ + _gitk +} + +__git_match_ctag() { + awk "/^${1////\\/}/ { print \$1 }" "$2" +} + +_git_grep () +{ + __git_has_doubledash && return + + case "$cur" in + --*) + __gitcomp " + --cached + --text --ignore-case --word-regexp --invert-match + --full-name --line-number + --extended-regexp --basic-regexp --fixed-strings + --perl-regexp + --files-with-matches --name-only + --files-without-match + --max-depth + --count + --and --or --not --all-match + " + return + ;; + esac + + case "$cword,$prev" in + 2,*|*,-*) + if test -r tags; then + __gitcomp_nl "$(__git_match_ctag "$cur" tags)" + return + fi + ;; + esac + + __gitcomp_nl "$(__git_refs)" +} + +_git_help () +{ + case "$cur" in + --*) + __gitcomp "--all --info --man --web" + return + ;; + esac + __git_compute_all_commands + __gitcomp "$__git_all_commands $(__git_aliases) + attributes cli core-tutorial cvs-migration + diffcore gitk glossary hooks ignore modules + namespaces repository-layout tutorial tutorial-2 + workflows + " +} + +_git_init () +{ + case "$cur" in + --shared=*) + __gitcomp " + false true umask group all world everybody + " "" "${cur##--shared=}" + return + ;; + --*) + __gitcomp "--quiet --bare --template= --shared --shared=" + return + ;; + esac +} + +_git_ls_files () +{ + case "$cur" in + --*) + __gitcomp "--cached --deleted --modified --others --ignored + --stage --directory --no-empty-directory --unmerged + --killed --exclude= --exclude-from= + --exclude-per-directory= --exclude-standard + --error-unmatch --with-tree= --full-name + --abbrev --ignored --exclude-per-directory + " + return + ;; + esac + + # XXX ignore options like --modified and always suggest all cached + # files. + __git_complete_index_file "--cached" +} + +_git_ls_remote () +{ + __gitcomp_nl "$(__git_remotes)" +} + +_git_ls_tree () +{ + __git_complete_file +} + +# Options that go well for log, shortlog and gitk +__git_log_common_options=" + --not --all + --branches --tags --remotes + --first-parent --merges --no-merges + --max-count= + --max-age= --since= --after= + --min-age= --until= --before= + --min-parents= --max-parents= + --no-min-parents --no-max-parents +" +# Options that go well for log and gitk (not shortlog) +__git_log_gitk_options=" + --dense --sparse --full-history + --simplify-merges --simplify-by-decoration + --left-right --notes --no-notes +" +# Options that go well for log and shortlog (not gitk) +__git_log_shortlog_options=" + --author= --committer= --grep= + --all-match +" + +__git_log_pretty_formats="oneline short medium full fuller email raw format:" +__git_log_date_formats="relative iso8601 rfc2822 short local default raw" + +_git_log () +{ + __git_has_doubledash && return + + local g="$(git rev-parse --git-dir 2>/dev/null)" + local merge="" + if [ -f "$g/MERGE_HEAD" ]; then + merge="--merge" + fi + case "$cur" in + --pretty=*|--format=*) + __gitcomp "$__git_log_pretty_formats $(__git_pretty_aliases) + " "" "${cur#*=}" + return + ;; + --date=*) + __gitcomp "$__git_log_date_formats" "" "${cur##--date=}" + return + ;; + --decorate=*) + __gitcomp "long short" "" "${cur##--decorate=}" + return + ;; + --*) + __gitcomp " + $__git_log_common_options + $__git_log_shortlog_options + $__git_log_gitk_options + --root --topo-order --date-order --reverse + --follow --full-diff + --abbrev-commit --abbrev= + --relative-date --date= + --pretty= --format= --oneline + --cherry-pick + --graph + --decorate --decorate= + --walk-reflogs + --parents --children + $merge + $__git_diff_common_options + --pickaxe-all --pickaxe-regex + " + return + ;; + esac + __git_complete_revlist +} + +__git_merge_options=" + --no-commit --no-stat --log --no-log --squash --strategy + --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit +" + +_git_merge () +{ + __git_complete_strategy && return + + case "$cur" in + --*) + __gitcomp "$__git_merge_options" + return + esac + __gitcomp_nl "$(__git_refs)" +} + +_git_mergetool () +{ + case "$cur" in + --tool=*) + __gitcomp "$__git_mergetools_common tortoisemerge" "" "${cur##--tool=}" + return + ;; + --*) + __gitcomp "--tool=" + return + ;; + esac +} + +_git_merge_base () +{ + __gitcomp_nl "$(__git_refs)" +} + +_git_mv () +{ + case "$cur" in + --*) + __gitcomp "--dry-run" + return + ;; + esac + + if [ $(__git_count_arguments "mv") -gt 0 ]; then + # We need to show both cached and untracked files (including + # empty directories) since this may not be the last argument. + __git_complete_index_file "--cached --others --directory" + else + __git_complete_index_file "--cached" + fi +} + +_git_name_rev () +{ + __gitcomp "--tags --all --stdin" +} + +_git_notes () +{ + local subcommands='add append copy edit list prune remove show' + local subcommand="$(__git_find_on_cmdline "$subcommands")" + + case "$subcommand,$cur" in + ,--*) + __gitcomp '--ref' + ;; + ,*) + case "$prev" in + --ref) + __gitcomp_nl "$(__git_refs)" + ;; + *) + __gitcomp "$subcommands --ref" + ;; + esac + ;; + add,--reuse-message=*|append,--reuse-message=*|\ + add,--reedit-message=*|append,--reedit-message=*) + __gitcomp_nl "$(__git_refs)" "" "${cur#*=}" + ;; + add,--*|append,--*) + __gitcomp '--file= --message= --reedit-message= + --reuse-message=' + ;; + copy,--*) + __gitcomp '--stdin' + ;; + prune,--*) + __gitcomp '--dry-run --verbose' + ;; + prune,*) + ;; + *) + case "$prev" in + -m|-F) + ;; + *) + __gitcomp_nl "$(__git_refs)" + ;; + esac + ;; + esac +} + +_git_pull () +{ + __git_complete_strategy && return + + case "$cur" in + --*) + __gitcomp " + --rebase --no-rebase + $__git_merge_options + $__git_fetch_options + " + return + ;; + esac + __git_complete_remote_or_refspec +} + +_git_push () +{ + case "$prev" in + --repo) + __gitcomp_nl "$(__git_remotes)" + return + esac + case "$cur" in + --repo=*) + __gitcomp_nl "$(__git_remotes)" "" "${cur##--repo=}" + return + ;; + --*) + __gitcomp " + --all --mirror --tags --dry-run --force --verbose + --receive-pack= --repo= --set-upstream + " + return + ;; + esac + __git_complete_remote_or_refspec +} + +_git_rebase () +{ + local dir="$(__gitdir)" + if [ -d "$dir"/rebase-apply ] || [ -d "$dir"/rebase-merge ]; then + __gitcomp "--continue --skip --abort" + return + fi + __git_complete_strategy && return + case "$cur" in + --whitespace=*) + __gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}" + return + ;; + --*) + __gitcomp " + --onto --merge --strategy --interactive + --preserve-merges --stat --no-stat + --committer-date-is-author-date --ignore-date + --ignore-whitespace --whitespace= + --autosquash + " + + return + esac + __gitcomp_nl "$(__git_refs)" +} + +_git_reflog () +{ + local subcommands="show delete expire" + local subcommand="$(__git_find_on_cmdline "$subcommands")" + + if [ -z "$subcommand" ]; then + __gitcomp "$subcommands" + else + __gitcomp_nl "$(__git_refs)" + fi +} + +__git_send_email_confirm_options="always never auto cc compose" +__git_send_email_suppresscc_options="author self cc bodycc sob cccmd body all" + +_git_send_email () +{ + case "$cur" in + --confirm=*) + __gitcomp " + $__git_send_email_confirm_options + " "" "${cur##--confirm=}" + return + ;; + --suppress-cc=*) + __gitcomp " + $__git_send_email_suppresscc_options + " "" "${cur##--suppress-cc=}" + + return + ;; + --smtp-encryption=*) + __gitcomp "ssl tls" "" "${cur##--smtp-encryption=}" + return + ;; + --thread=*) + __gitcomp " + deep shallow + " "" "${cur##--thread=}" + return + ;; + --*) + __gitcomp "--annotate --bcc --cc --cc-cmd --chain-reply-to + --compose --confirm= --dry-run --envelope-sender + --from --identity + --in-reply-to --no-chain-reply-to --no-signed-off-by-cc + --no-suppress-from --no-thread --quiet + --signed-off-by-cc --smtp-pass --smtp-server + --smtp-server-port --smtp-encryption= --smtp-user + --subject --suppress-cc= --suppress-from --thread --to + --validate --no-validate + $__git_format_patch_options" + return + ;; + esac + __git_complete_revlist +} + +_git_stage () +{ + _git_add +} + +__git_config_get_set_variables () +{ + local prevword word config_file= c=$cword + while [ $c -gt 1 ]; do + word="${words[c]}" + case "$word" in + --system|--global|--local|--file=*) + config_file="$word" + break + ;; + -f|--file) + config_file="$word $prevword" + break + ;; + esac + prevword=$word + c=$((--c)) + done + + git --git-dir="$(__gitdir)" config $config_file --list 2>/dev/null | + while read -r line + do + case "$line" in + *.*=*) + echo "${line/=*/}" + ;; + esac + done +} + +_git_config () +{ + case "$prev" in + branch.*.remote|branch.*.pushremote) + __gitcomp_nl "$(__git_remotes)" + return + ;; + branch.*.merge) + __gitcomp_nl "$(__git_refs)" + return + ;; + branch.*.rebase) + __gitcomp "false true" + return + ;; + remote.pushdefault) + __gitcomp_nl "$(__git_remotes)" + return + ;; + remote.*.fetch) + local remote="${prev#remote.}" + remote="${remote%.fetch}" + if [ -z "$cur" ]; then + __gitcomp_nl "refs/heads/" "" "" "" + return + fi + __gitcomp_nl "$(__git_refs_remotes "$remote")" + return + ;; + remote.*.push) + local remote="${prev#remote.}" + remote="${remote%.push}" + __gitcomp_nl "$(git --git-dir="$(__gitdir)" \ + for-each-ref --format='%(refname):%(refname)' \ + refs/heads)" + return + ;; + pull.twohead|pull.octopus) + __git_compute_merge_strategies + __gitcomp "$__git_merge_strategies" + return + ;; + color.branch|color.diff|color.interactive|\ + color.showbranch|color.status|color.ui) + __gitcomp "always never auto" + return + ;; + color.pager) + __gitcomp "false true" + return + ;; + color.*.*) + __gitcomp " + normal black red green yellow blue magenta cyan white + bold dim ul blink reverse + " + return + ;; + diff.submodule) + __gitcomp "log short" + return + ;; + help.format) + __gitcomp "man info web html" + return + ;; + log.date) + __gitcomp "$__git_log_date_formats" + return + ;; + sendemail.aliasesfiletype) + __gitcomp "mutt mailrc pine elm gnus" + return + ;; + sendemail.confirm) + __gitcomp "$__git_send_email_confirm_options" + return + ;; + sendemail.suppresscc) + __gitcomp "$__git_send_email_suppresscc_options" + return + ;; + --get|--get-all|--unset|--unset-all) + __gitcomp_nl "$(__git_config_get_set_variables)" + return + ;; + *.*) + return + ;; + esac + case "$cur" in + --*) + __gitcomp " + --system --global --local --file= + --list --replace-all + --get --get-all --get-regexp + --add --unset --unset-all + --remove-section --rename-section + " + return + ;; + branch.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp "remote pushremote merge mergeoptions rebase" "$pfx" "$cur_" + return + ;; + branch.*) + local pfx="${cur%.*}." cur_="${cur#*.}" + __gitcomp_nl "$(__git_heads)" "$pfx" "$cur_" "." + return + ;; + guitool.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp " + argprompt cmd confirm needsfile noconsole norescan + prompt revprompt revunmerged title + " "$pfx" "$cur_" + return + ;; + difftool.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp "cmd path" "$pfx" "$cur_" + return + ;; + man.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp "cmd path" "$pfx" "$cur_" + return + ;; + mergetool.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp "cmd path trustExitCode" "$pfx" "$cur_" + return + ;; + pager.*) + local pfx="${cur%.*}." cur_="${cur#*.}" + __git_compute_all_commands + __gitcomp_nl "$__git_all_commands" "$pfx" "$cur_" + return + ;; + remote.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp " + url proxy fetch push mirror skipDefaultUpdate + receivepack uploadpack tagopt pushurl + " "$pfx" "$cur_" + return + ;; + remote.*) + local pfx="${cur%.*}." cur_="${cur#*.}" + __gitcomp_nl "$(__git_remotes)" "$pfx" "$cur_" "." + return + ;; + url.*.*) + local pfx="${cur%.*}." cur_="${cur##*.}" + __gitcomp "insteadOf pushInsteadOf" "$pfx" "$cur_" + return + ;; + esac + __gitcomp " + add.ignoreErrors + advice.commitBeforeMerge + advice.detachedHead + advice.implicitIdentity + advice.pushNonFastForward + advice.resolveConflict + advice.statusHints + alias. + am.keepcr + apply.ignorewhitespace + apply.whitespace + branch.autosetupmerge + branch.autosetuprebase + browser. + clean.requireForce + color.branch + color.branch.current + color.branch.local + color.branch.plain + color.branch.remote + color.decorate.HEAD + color.decorate.branch + color.decorate.remoteBranch + color.decorate.stash + color.decorate.tag + color.diff + color.diff.commit + color.diff.frag + color.diff.func + color.diff.meta + color.diff.new + color.diff.old + color.diff.plain + color.diff.whitespace + color.grep + color.grep.context + color.grep.filename + color.grep.function + color.grep.linenumber + color.grep.match + color.grep.selected + color.grep.separator + color.interactive + color.interactive.error + color.interactive.header + color.interactive.help + color.interactive.prompt + color.pager + color.showbranch + color.status + color.status.added + color.status.changed + color.status.header + color.status.nobranch + color.status.untracked + color.status.updated + color.ui + commit.status + commit.template + core.abbrev + core.askpass + core.attributesfile + core.autocrlf + core.bare + core.bigFileThreshold + core.compression + core.createObject + core.deltaBaseCacheLimit + core.editor + core.eol + core.excludesfile + core.fileMode + core.fsyncobjectfiles + core.gitProxy + core.ignoreStat + core.ignorecase + core.logAllRefUpdates + core.loosecompression + core.notesRef + core.packedGitLimit + core.packedGitWindowSize + core.pager + core.preferSymlinkRefs + core.preloadindex + core.quotepath + core.repositoryFormatVersion + core.safecrlf + core.sharedRepository + core.sparseCheckout + core.symlinks + core.trustctime + core.warnAmbiguousRefs + core.whitespace + core.worktree + diff.autorefreshindex + diff.external + diff.ignoreSubmodules + diff.mnemonicprefix + diff.noprefix + diff.renameLimit + diff.renames + diff.statGraphWidth + diff.submodule + diff.suppressBlankEmpty + diff.tool + diff.wordRegex + diff.algorithm + difftool. + difftool.prompt + fetch.recurseSubmodules + fetch.unpackLimit + format.attach + format.cc + format.headers + format.numbered + format.pretty + format.signature + format.signoff + format.subjectprefix + format.suffix + format.thread + format.to + gc. + gc.aggressiveWindow + gc.auto + gc.autopacklimit + gc.packrefs + gc.pruneexpire + gc.reflogexpire + gc.reflogexpireunreachable + gc.rerereresolved + gc.rerereunresolved + gitcvs.allbinary + gitcvs.commitmsgannotation + gitcvs.dbTableNamePrefix + gitcvs.dbdriver + gitcvs.dbname + gitcvs.dbpass + gitcvs.dbuser + gitcvs.enabled + gitcvs.logfile + gitcvs.usecrlfattr + guitool. + gui.blamehistoryctx + gui.commitmsgwidth + gui.copyblamethreshold + gui.diffcontext + gui.encoding + gui.fastcopyblame + gui.matchtrackingbranch + gui.newbranchtemplate + gui.pruneduringfetch + gui.spellingdictionary + gui.trustmtime + help.autocorrect + help.browser + help.format + http.lowSpeedLimit + http.lowSpeedTime + http.maxRequests + http.minSessions + http.noEPSV + http.postBuffer + http.proxy + http.sslCAInfo + http.sslCAPath + http.sslCert + http.sslCertPasswordProtected + http.sslKey + http.sslVerify + http.useragent + i18n.commitEncoding + i18n.logOutputEncoding + imap.authMethod + imap.folder + imap.host + imap.pass + imap.port + imap.preformattedHTML + imap.sslverify + imap.tunnel + imap.user + init.templatedir + instaweb.browser + instaweb.httpd + instaweb.local + instaweb.modulepath + instaweb.port + interactive.singlekey + log.date + log.decorate + log.showroot + mailmap.file + man. + man.viewer + merge. + merge.conflictstyle + merge.log + merge.renameLimit + merge.renormalize + merge.stat + merge.tool + merge.verbosity + mergetool. + mergetool.keepBackup + mergetool.keepTemporaries + mergetool.prompt + notes.displayRef + notes.rewrite. + notes.rewrite.amend + notes.rewrite.rebase + notes.rewriteMode + notes.rewriteRef + pack.compression + pack.deltaCacheLimit + pack.deltaCacheSize + pack.depth + pack.indexVersion + pack.packSizeLimit + pack.threads + pack.window + pack.windowMemory + pager. + pretty. + pull.octopus + pull.twohead + push.default + rebase.autosquash + rebase.stat + receive.autogc + receive.denyCurrentBranch + receive.denyDeleteCurrent + receive.denyDeletes + receive.denyNonFastForwards + receive.fsckObjects + receive.unpackLimit + receive.updateserverinfo + remote.pushdefault + remotes. + repack.usedeltabaseoffset + rerere.autoupdate + rerere.enabled + sendemail. + sendemail.aliasesfile + sendemail.aliasfiletype + sendemail.bcc + sendemail.cc + sendemail.cccmd + sendemail.chainreplyto + sendemail.confirm + sendemail.envelopesender + sendemail.from + sendemail.identity + sendemail.multiedit + sendemail.signedoffbycc + sendemail.smtpdomain + sendemail.smtpencryption + sendemail.smtppass + sendemail.smtpserver + sendemail.smtpserveroption + sendemail.smtpserverport + sendemail.smtpuser + sendemail.suppresscc + sendemail.suppressfrom + sendemail.thread + sendemail.to + sendemail.validate + showbranch.default + status.relativePaths + status.showUntrackedFiles + status.submodulesummary + submodule. + tar.umask + transfer.unpackLimit + url. + user.email + user.name + user.signingkey + web.browser + branch. remote. + " +} + +_git_remote () +{ + local subcommands="add rename remove set-head set-branches set-url show prune update" + local subcommand="$(__git_find_on_cmdline "$subcommands")" + if [ -z "$subcommand" ]; then + __gitcomp "$subcommands" + return + fi + + case "$subcommand" in + rename|remove|set-url|show|prune) + __gitcomp_nl "$(__git_remotes)" + ;; + set-head|set-branches) + __git_complete_remote_or_refspec + ;; + update) + local i c='' IFS=$'\n' + for i in $(git --git-dir="$(__gitdir)" config --get-regexp "remotes\..*" 2>/dev/null); do + i="${i#remotes.}" + c="$c ${i/ */}" + done + __gitcomp "$c" + ;; + *) + ;; + esac +} + +_git_replace () +{ + __gitcomp_nl "$(__git_refs)" +} + +_git_reset () +{ + __git_has_doubledash && return + + case "$cur" in + --*) + __gitcomp "--merge --mixed --hard --soft --patch" + return + ;; + esac + __gitcomp_nl "$(__git_refs)" +} + +_git_revert () +{ + case "$cur" in + --*) + __gitcomp "--edit --mainline --no-edit --no-commit --signoff" + return + ;; + esac + __gitcomp_nl "$(__git_refs)" +} + +_git_rm () +{ + case "$cur" in + --*) + __gitcomp "--cached --dry-run --ignore-unmatch --quiet" + return + ;; + esac + + __git_complete_index_file "--cached" +} + +_git_shortlog () +{ + __git_has_doubledash && return + + case "$cur" in + --*) + __gitcomp " + $__git_log_common_options + $__git_log_shortlog_options + --numbered --summary + " + return + ;; + esac + __git_complete_revlist +} + +_git_show () +{ + __git_has_doubledash && return + + case "$cur" in + --pretty=*|--format=*) + __gitcomp "$__git_log_pretty_formats $(__git_pretty_aliases) + " "" "${cur#*=}" + return + ;; + --diff-algorithm=*) + __gitcomp "$__git_diff_algorithms" "" "${cur##--diff-algorithm=}" + return + ;; + --*) + __gitcomp "--pretty= --format= --abbrev-commit --oneline + $__git_diff_common_options + " + return + ;; + esac + __git_complete_revlist_file +} + +_git_show_branch () +{ + case "$cur" in + --*) + __gitcomp " + --all --remotes --topo-order --current --more= + --list --independent --merge-base --no-name + --color --no-color + --sha1-name --sparse --topics --reflog + " + return + ;; + esac + __git_complete_revlist +} + +_git_stash () +{ + local save_opts='--keep-index --no-keep-index --quiet --patch' + local subcommands='save list show apply clear drop pop create branch' + local subcommand="$(__git_find_on_cmdline "$subcommands")" + if [ -z "$subcommand" ]; then + case "$cur" in + --*) + __gitcomp "$save_opts" + ;; + *) + if [ -z "$(__git_find_on_cmdline "$save_opts")" ]; then + __gitcomp "$subcommands" + fi + ;; + esac + else + case "$subcommand,$cur" in + save,--*) + __gitcomp "$save_opts" + ;; + apply,--*|pop,--*) + __gitcomp "--index --quiet" + ;; + show,--*|drop,--*|branch,--*) + ;; + show,*|apply,*|drop,*|pop,*|branch,*) + __gitcomp_nl "$(git --git-dir="$(__gitdir)" stash list \ + | sed -n -e 's/:.*//p')" + ;; + *) + ;; + esac + fi +} + +_git_submodule () +{ + __git_has_doubledash && return + + local subcommands="add status init deinit update summary foreach sync" + if [ -z "$(__git_find_on_cmdline "$subcommands")" ]; then + case "$cur" in + --*) + __gitcomp "--quiet --cached" + ;; + *) + __gitcomp "$subcommands" + ;; + esac + return + fi +} + +_git_svn () +{ + local subcommands=" + init fetch clone rebase dcommit log find-rev + set-tree commit-diff info create-ignore propget + proplist show-ignore show-externals branch tag blame + migrate mkdirs reset gc + " + local subcommand="$(__git_find_on_cmdline "$subcommands")" + if [ -z "$subcommand" ]; then + __gitcomp "$subcommands" + else + local remote_opts="--username= --config-dir= --no-auth-cache" + local fc_opts=" + --follow-parent --authors-file= --repack= + --no-metadata --use-svm-props --use-svnsync-props + --log-window-size= --no-checkout --quiet + --repack-flags --use-log-author --localtime + --ignore-paths= --include-paths= $remote_opts + " + local init_opts=" + --template= --shared= --trunk= --tags= + --branches= --stdlayout --minimize-url + --no-metadata --use-svm-props --use-svnsync-props + --rewrite-root= --prefix= --use-log-author + --add-author-from $remote_opts + " + local cmt_opts=" + --edit --rmdir --find-copies-harder --copy-similarity= + " + + case "$subcommand,$cur" in + fetch,--*) + __gitcomp "--revision= --fetch-all $fc_opts" + ;; + clone,--*) + __gitcomp "--revision= $fc_opts $init_opts" + ;; + init,--*) + __gitcomp "$init_opts" + ;; + dcommit,--*) + __gitcomp " + --merge --strategy= --verbose --dry-run + --fetch-all --no-rebase --commit-url + --revision --interactive $cmt_opts $fc_opts + " + ;; + set-tree,--*) + __gitcomp "--stdin $cmt_opts $fc_opts" + ;; + create-ignore,--*|propget,--*|proplist,--*|show-ignore,--*|\ + show-externals,--*|mkdirs,--*) + __gitcomp "--revision=" + ;; + log,--*) + __gitcomp " + --limit= --revision= --verbose --incremental + --oneline --show-commit --non-recursive + --authors-file= --color + " + ;; + rebase,--*) + __gitcomp " + --merge --verbose --strategy= --local + --fetch-all --dry-run $fc_opts + " + ;; + commit-diff,--*) + __gitcomp "--message= --file= --revision= $cmt_opts" + ;; + info,--*) + __gitcomp "--url" + ;; + branch,--*) + __gitcomp "--dry-run --message --tag" + ;; + tag,--*) + __gitcomp "--dry-run --message" + ;; + blame,--*) + __gitcomp "--git-format" + ;; + migrate,--*) + __gitcomp " + --config-dir= --ignore-paths= --minimize + --no-auth-cache --username= + " + ;; + reset,--*) + __gitcomp "--revision= --parent" + ;; + *) + ;; + esac + fi +} + +_git_tag () +{ + local i c=1 f=0 + while [ $c -lt $cword ]; do + i="${words[c]}" + case "$i" in + -d|-v) + __gitcomp_nl "$(__git_tags)" + return + ;; + -f) + f=1 + ;; + esac + ((c++)) + done + + case "$prev" in + -m|-F) + ;; + -*|tag) + if [ $f = 1 ]; then + __gitcomp_nl "$(__git_tags)" + fi + ;; + *) + __gitcomp_nl "$(__git_refs)" + ;; + esac +} + +_git_whatchanged () +{ + _git_log +} + +__git_main () +{ + local i c=1 command __git_dir + + while [ $c -lt $cword ]; do + i="${words[c]}" + case "$i" in + --git-dir=*) __git_dir="${i#--git-dir=}" ;; + --git-dir) ((c++)) ; __git_dir="${words[c]}" ;; + --bare) __git_dir="." ;; + --help) command="help"; break ;; + -c|--work-tree|--namespace) ((c++)) ;; + -*) ;; + *) command="$i"; break ;; + esac + ((c++)) + done + + if [ -z "$command" ]; then + case "$cur" in + --*) __gitcomp " + --paginate + --no-pager + --git-dir= + --bare + --version + --exec-path + --exec-path= + --html-path + --man-path + --info-path + --work-tree= + --namespace= + --no-replace-objects + --help + " + ;; + *) __git_compute_porcelain_commands + __gitcomp "$__git_porcelain_commands $(__git_aliases)" ;; + esac + return + fi + + local completion_func="_git_${command//-/_}" + declare -f $completion_func >/dev/null && $completion_func && return + + local expansion=$(__git_aliased_command "$command") + if [ -n "$expansion" ]; then + completion_func="_git_${expansion//-/_}" + declare -f $completion_func >/dev/null && $completion_func + fi +} + +__gitk_main () +{ + __git_has_doubledash && return + + local g="$(__gitdir)" + local merge="" + if [ -f "$g/MERGE_HEAD" ]; then + merge="--merge" + fi + case "$cur" in + --*) + __gitcomp " + $__git_log_common_options + $__git_log_gitk_options + $merge + " + return + ;; + esac + __git_complete_revlist +} + +if [[ -n ${ZSH_VERSION-} ]]; then + echo "WARNING: this script is deprecated, please see git-completion.zsh" 1>&2 + + autoload -U +X compinit && compinit + + __gitcomp () + { + emulate -L zsh + + local cur_="${3-$cur}" + + case "$cur_" in + --*=) + ;; + *) + local c IFS=$' \t\n' + local -a array + for c in ${=1}; do + c="$c${4-}" + case $c in + --*=*|*.) ;; + *) c="$c " ;; + esac + array[$#array+1]="$c" + done + compset -P '*[=:]' + compadd -Q -S '' -p "${2-}" -a -- array && _ret=0 + ;; + esac + } + + __gitcomp_nl () + { + emulate -L zsh + + local IFS=$'\n' + compset -P '*[=:]' + compadd -Q -S "${4- }" -p "${2-}" -- ${=1} && _ret=0 + } + + __gitcomp_file () + { + emulate -L zsh + + local IFS=$'\n' + compset -P '*[=:]' + compadd -Q -p "${2-}" -f -- ${=1} && _ret=0 + } + + _git () + { + local _ret=1 cur cword prev + cur=${words[CURRENT]} + prev=${words[CURRENT-1]} + let cword=CURRENT-1 + emulate ksh -c __${service}_main + let _ret && _default && _ret=0 + return _ret + } + + compdef _git git gitk + return +fi + +__git_func_wrap () +{ + local cur words cword prev + _get_comp_words_by_ref -n =: cur words cword prev + $1 +} + +# Setup completion for certain functions defined above by setting common +# variables and workarounds. +# This is NOT a public function; use at your own risk. +__git_complete () +{ + local wrapper="__git_wrap${2}" + eval "$wrapper () { __git_func_wrap $2 ; }" + complete -o bashdefault -o default -o nospace -F $wrapper $1 2>/dev/null \ + || complete -o default -o nospace -F $wrapper $1 +} + +# wrapper for backwards compatibility +_git () +{ + __git_wrap__git_main +} + +# wrapper for backwards compatibility +_gitk () +{ + __git_wrap__gitk_main +} + +__git_complete git __git_main +__git_complete gitk __gitk_main + +# The following are necessary only for Cygwin, and only are needed +# when the user has tab-completed the executable name and consequently +# included the '.exe' suffix. +# +if [ Cygwin = "$(uname -o 2>/dev/null)" ]; then +__git_complete git.exe __git_main +fi diff --git a/paddle/scripts/docker/root/.scripts/git-prompt.sh b/paddle/scripts/docker/root/.scripts/git-prompt.sh new file mode 100755 index 0000000000000000000000000000000000000000..576f4ec14c94a24ebffa9e2620acf881e6b5ddaa --- /dev/null +++ b/paddle/scripts/docker/root/.scripts/git-prompt.sh @@ -0,0 +1,445 @@ +# bash/zsh git prompt support +# +# Copyright (C) 2006,2007 Shawn O. Pearce +# Distributed under the GNU General Public License, version 2.0. +# +# This script allows you to see repository status in your prompt. +# +# To enable: +# +# 1) Copy this file to somewhere (e.g. ~/.git-prompt.sh). +# 2) Add the following line to your .bashrc/.zshrc: +# source ~/.git-prompt.sh +# 3a) Change your PS1 to call __git_ps1 as +# command-substitution: +# Bash: PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ ' +# ZSH: setopt PROMPT_SUBST ; PS1='[%n@%m %c$(__git_ps1 " (%s)")]\$ ' +# the optional argument will be used as format string. +# 3b) Alternatively, for a slightly faster prompt, __git_ps1 can +# be used for PROMPT_COMMAND in Bash or for precmd() in Zsh +# with two parameters,
 and , which are strings
+#        you would put in $PS1 before and after the status string
+#        generated by the git-prompt machinery.  e.g.
+#        Bash: PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
+#          will show username, at-sign, host, colon, cwd, then
+#          various status string, followed by dollar and SP, as
+#          your prompt.
+#        ZSH:  precmd () { __git_ps1 "%n" ":%~$ " "|%s" }
+#          will show username, pipe, then various status string,
+#          followed by colon, cwd, dollar and SP, as your prompt.
+#        Optionally, you can supply a third argument with a printf
+#        format string to finetune the output of the branch status
+#
+# The repository status will be displayed only if you are currently in a
+# git repository. The %s token is the placeholder for the shown status.
+#
+# The prompt status always includes the current branch name.
+#
+# In addition, if you set GIT_PS1_SHOWDIRTYSTATE to a nonempty value,
+# unstaged (*) and staged (+) changes will be shown next to the branch
+# name.  You can configure this per-repository with the
+# bash.showDirtyState variable, which defaults to true once
+# GIT_PS1_SHOWDIRTYSTATE is enabled.
+#
+# You can also see if currently something is stashed, by setting
+# GIT_PS1_SHOWSTASHSTATE to a nonempty value. If something is stashed,
+# then a '$' will be shown next to the branch name.
+#
+# If you would like to see if there're untracked files, then you can set
+# GIT_PS1_SHOWUNTRACKEDFILES to a nonempty value. If there're untracked
+# files, then a '%' will be shown next to the branch name.  You can
+# configure this per-repository with the bash.showUntrackedFiles
+# variable, which defaults to true once GIT_PS1_SHOWUNTRACKEDFILES is
+# enabled.
+#
+# If you would like to see the difference between HEAD and its upstream,
+# set GIT_PS1_SHOWUPSTREAM="auto".  A "<" indicates you are behind, ">"
+# indicates you are ahead, "<>" indicates you have diverged and "="
+# indicates that there is no difference. You can further control
+# behaviour by setting GIT_PS1_SHOWUPSTREAM to a space-separated list
+# of values:
+#
+#     verbose       show number of commits ahead/behind (+/-) upstream
+#     legacy        don't use the '--count' option available in recent
+#                   versions of git-rev-list
+#     git           always compare HEAD to @{upstream}
+#     svn           always compare HEAD to your SVN upstream
+#
+# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
+# find one, or @{upstream} otherwise.  Once you have set
+# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
+# setting the bash.showUpstream config variable.
+#
+# If you would like to see more information about the identity of
+# commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
+# to one of these values:
+#
+#     contains      relative to newer annotated tag (v1.6.3.2~35)
+#     branch        relative to newer tag or branch (master~4)
+#     describe      relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
+#     default       exactly matching tag
+#
+# If you would like a colored hint about the current dirty state, set
+# GIT_PS1_SHOWCOLORHINTS to a nonempty value. The colors are based on
+# the colored output of "git status -sb" and are available only when
+# using __git_ps1 for PROMPT_COMMAND or precmd.
+
+# stores the divergence from upstream in $p
+# used by GIT_PS1_SHOWUPSTREAM
+__git_ps1_show_upstream ()
+{
+  local key value
+  local svn_remote svn_url_pattern count n
+  local upstream=git legacy="" verbose=""
+
+  svn_remote=()
+  # get some config options from git-config
+  local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
+  while read -r key value; do
+    case "$key" in
+    bash.showupstream)
+      GIT_PS1_SHOWUPSTREAM="$value"
+      if [[ -z "${GIT_PS1_SHOWUPSTREAM}" ]]; then
+        p=""
+        return
+      fi
+      ;;
+    svn-remote.*.url)
+      svn_remote[$((${#svn_remote[@]} + 1))]="$value"
+      svn_url_pattern+="\\|$value"
+      upstream=svn+git # default upstream is SVN if available, else git
+      ;;
+    esac
+  done <<< "$output"
+
+  # parse configuration values
+  for option in ${GIT_PS1_SHOWUPSTREAM}; do
+    case "$option" in
+    git|svn) upstream="$option" ;;
+    verbose) verbose=1 ;;
+    legacy)  legacy=1  ;;
+    esac
+  done
+
+  # Find our upstream
+  case "$upstream" in
+  git)    upstream="@{upstream}" ;;
+  svn*)
+    # get the upstream from the "git-svn-id: ..." in a commit message
+    # (git-svn uses essentially the same procedure internally)
+    local -a svn_upstream
+    svn_upstream=($(git log --first-parent -1 \
+          --grep="^git-svn-id: \(${svn_url_pattern#??}\)" 2>/dev/null))
+    if [[ 0 -ne ${#svn_upstream[@]} ]]; then
+      svn_upstream=${svn_upstream[${#svn_upstream[@]} - 2]}
+      svn_upstream=${svn_upstream%@*}
+      local n_stop="${#svn_remote[@]}"
+      for ((n=1; n <= n_stop; n++)); do
+        svn_upstream=${svn_upstream#${svn_remote[$n]}}
+      done
+
+      if [[ -z "$svn_upstream" ]]; then
+        # default branch name for checkouts with no layout:
+        upstream=${GIT_SVN_ID:-git-svn}
+      else
+        upstream=${svn_upstream#/}
+      fi
+    elif [[ "svn+git" = "$upstream" ]]; then
+      upstream="@{upstream}"
+    fi
+    ;;
+  esac
+
+  # Find how many commits we are ahead/behind our upstream
+  if [[ -z "$legacy" ]]; then
+    count="$(git rev-list --count --left-right \
+        "$upstream"...HEAD 2>/dev/null)"
+  else
+    # produce equivalent output to --count for older versions of git
+    local commits
+    if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
+    then
+      local commit behind=0 ahead=0
+      for commit in $commits
+      do
+        case "$commit" in
+        "<"*) ((behind++)) ;;
+        *)    ((ahead++))  ;;
+        esac
+      done
+      count="$behind  $ahead"
+    else
+      count=""
+    fi
+  fi
+
+  # calculate the result
+  if [[ -z "$verbose" ]]; then
+    case "$count" in
+    "") # no upstream
+      p="" ;;
+    "0  0") # equal to upstream
+      p="=" ;;
+    "0  "*) # ahead of upstream
+      p=">" ;;
+    *"  0") # behind upstream
+      p="<" ;;
+    *)      # diverged from upstream
+      p="<>" ;;
+    esac
+  else
+    case "$count" in
+    "") # no upstream
+      p="" ;;
+    "0  0") # equal to upstream
+      p=" u=" ;;
+    "0  "*) # ahead of upstream
+      p=" u+${count#0 }" ;;
+    *"  0") # behind upstream
+      p=" u-${count%  0}" ;;
+    *)      # diverged from upstream
+      p=" u+${count#* }-${count%  *}" ;;
+    esac
+  fi
+
+}
+
+# Helper function that is meant to be called from __git_ps1.  It
+# injects color codes into the appropriate gitstring variables used
+# to build a gitstring.
+__git_ps1_colorize_gitstring ()
+{
+  if [[ -n ${ZSH_VERSION-} ]]; then
+    local c_red='%F{red}'
+    local c_green='%F{green}'
+    local c_lblue='%F{blue}'
+    local c_clear='%f'
+  else
+    # Using \[ and \] around colors is necessary to prevent
+    # issues with command line editing/browsing/completion!
+    local c_red='\[\e[31m\]'
+    local c_green='\[\e[32m\]'
+    local c_lblue='\[\e[1;34m\]'
+    local c_clear='\[\e[0m\]'
+  fi
+  local bad_color=$c_red
+  local ok_color=$c_green
+  local flags_color="$c_lblue"
+
+  local branch_color=""
+  if [ $detached = no ]; then
+    branch_color="$ok_color"
+  else
+    branch_color="$bad_color"
+  fi
+  c="$branch_color$c"
+
+  z="$c_clear$z"
+  if [ "$w" = "*" ]; then
+    w="$bad_color$w"
+  fi
+  if [ -n "$i" ]; then
+    i="$ok_color$i"
+  fi
+  if [ -n "$s" ]; then
+    s="$flags_color$s"
+  fi
+  if [ -n "$u" ]; then
+    u="$bad_color$u"
+  fi
+  r="$c_clear$r"
+}
+
+# __git_ps1 accepts 0 or 1 arguments (i.e., format string)
+# when called from PS1 using command substitution
+# in this mode it prints text to add to bash PS1 prompt (includes branch name)
+#
+# __git_ps1 requires 2 or 3 arguments when called from PROMPT_COMMAND (pc)
+# in that case it _sets_ PS1. The arguments are parts of a PS1 string.
+# when two arguments are given, the first is prepended and the second appended
+# to the state string when assigned to PS1.
+# The optional third parameter will be used as printf format string to further
+# customize the output of the git-status string.
+# In this mode you can request colored hints using GIT_PS1_SHOWCOLORHINTS=true
+__git_ps1 ()
+{
+  local pcmode=no
+  local detached=no
+  local ps1pc_start='\u@\h:\w '
+  local ps1pc_end='\$ '
+  local printf_format=' (%s)'
+
+  case "$#" in
+    2|3)  pcmode=yes
+      ps1pc_start="$1"
+      ps1pc_end="$2"
+      printf_format="${3:-$printf_format}"
+    ;;
+    0|1)  printf_format="${1:-$printf_format}"
+    ;;
+    *)  return
+    ;;
+  esac
+
+  local repo_info rev_parse_exit_code
+  repo_info="$(git rev-parse --git-dir --is-inside-git-dir \
+    --is-bare-repository --is-inside-work-tree \
+    --short HEAD 2>/dev/null)"
+  rev_parse_exit_code="$?"
+
+  if [ -z "$repo_info" ]; then
+    if [ $pcmode = yes ]; then
+      #In PC mode PS1 always needs to be set
+      PS1="$ps1pc_start$ps1pc_end"
+    fi
+    return
+  fi
+
+  local short_sha
+  if [ "$rev_parse_exit_code" = "0" ]; then
+    short_sha="${repo_info##*$'\n'}"
+    repo_info="${repo_info%$'\n'*}"
+  fi
+  local inside_worktree="${repo_info##*$'\n'}"
+  repo_info="${repo_info%$'\n'*}"
+  local bare_repo="${repo_info##*$'\n'}"
+  repo_info="${repo_info%$'\n'*}"
+  local inside_gitdir="${repo_info##*$'\n'}"
+  local g="${repo_info%$'\n'*}"
+
+  local r=""
+  local b=""
+  local step=""
+  local total=""
+  if [ -d "$g/rebase-merge" ]; then
+    read b 2>/dev/null <"$g/rebase-merge/head-name"
+    read step 2>/dev/null <"$g/rebase-merge/msgnum"
+    read total 2>/dev/null <"$g/rebase-merge/end"
+    if [ -f "$g/rebase-merge/interactive" ]; then
+      r="|REBASE-i"
+    else
+      r="|REBASE-m"
+    fi
+  else
+    if [ -d "$g/rebase-apply" ]; then
+      read step 2>/dev/null <"$g/rebase-apply/next"
+      read total 2>/dev/null <"$g/rebase-apply/last"
+      if [ -f "$g/rebase-apply/rebasing" ]; then
+        read b 2>/dev/null <"$g/rebase-apply/head-name"
+        r="|REBASE"
+      elif [ -f "$g/rebase-apply/applying" ]; then
+        r="|AM"
+      else
+        r="|AM/REBASE"
+      fi
+    elif [ -f "$g/MERGE_HEAD" ]; then
+      r="|MERGING"
+    elif [ -f "$g/CHERRY_PICK_HEAD" ]; then
+      r="|CHERRY-PICKING"
+    elif [ -f "$g/REVERT_HEAD" ]; then
+      r="|REVERTING"
+    elif [ -f "$g/BISECT_LOG" ]; then
+      r="|BISECTING"
+    fi
+
+    if [ -n "$b" ]; then
+      :
+    elif [ -h "$g/HEAD" ]; then
+      # symlink symbolic ref
+      b="$(git symbolic-ref HEAD 2>/dev/null)"
+    else
+      local head=""
+      if ! read head 2>/dev/null <"$g/HEAD"; then
+        if [ $pcmode = yes ]; then
+          PS1="$ps1pc_start$ps1pc_end"
+        fi
+        return
+      fi
+      # is it a symbolic ref?
+      b="${head#ref: }"
+      if [ "$head" = "$b" ]; then
+        detached=yes
+        b="$(
+        case "${GIT_PS1_DESCRIBE_STYLE-}" in
+        (contains)
+          git describe --contains HEAD ;;
+        (branch)
+          git describe --contains --all HEAD ;;
+        (describe)
+          git describe HEAD ;;
+        (* | default)
+          git describe --tags --exact-match HEAD ;;
+        esac 2>/dev/null)" ||
+
+        b="$short_sha..."
+        b="($b)"
+      fi
+    fi
+  fi
+
+  if [ -n "$step" ] && [ -n "$total" ]; then
+    r="$r $step/$total"
+  fi
+
+  local w=""
+  local i=""
+  local s=""
+  local u=""
+  local c=""
+  local p=""
+
+  if [ "true" = "$inside_gitdir" ]; then
+    if [ "true" = "$bare_repo" ]; then
+      c="BARE:"
+    else
+      b="GIT_DIR!"
+    fi
+  elif [ "true" = "$inside_worktree" ]; then
+    if [ -n "${GIT_PS1_SHOWDIRTYSTATE-}" ] &&
+       [ "$(git config --bool bash.showDirtyState)" != "false" ]
+    then
+      git diff --no-ext-diff --quiet --exit-code || w="*"
+      if [ -n "$short_sha" ]; then
+        git diff-index --cached --quiet HEAD -- || i="+"
+      else
+        i="#"
+      fi
+    fi
+    if [ -n "${GIT_PS1_SHOWSTASHSTATE-}" ] &&
+       [ -r "$g/refs/stash" ]; then
+      s="$"
+    fi
+
+    if [ -n "${GIT_PS1_SHOWUNTRACKEDFILES-}" ] &&
+       [ "$(git config --bool bash.showUntrackedFiles)" != "false" ] &&
+       git ls-files --others --exclude-standard --error-unmatch -- '*' >/dev/null 2>/dev/null
+    then
+      u="%${ZSH_VERSION+%}"
+    fi
+
+    if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
+      __git_ps1_show_upstream
+    fi
+  fi
+
+  local z="${GIT_PS1_STATESEPARATOR-" "}"
+
+  # NO color option unless in PROMPT_COMMAND mode
+  if [ $pcmode = yes ] && [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
+    __git_ps1_colorize_gitstring
+  fi
+
+  local f="$w$i$s$u"
+  local gitstring="$c${b##refs/heads/}${f:+$z$f}$r$p"
+
+  if [ $pcmode = yes ]; then
+    if [[ -n ${ZSH_VERSION-} ]]; then
+      gitstring=$(printf -- "$printf_format" "$gitstring")
+    else
+      printf -v gitstring -- "$printf_format" "$gitstring"
+    fi
+    PS1="$ps1pc_start$gitstring$ps1pc_end"
+  else
+    printf -- "$printf_format" "$gitstring"
+  fi
+}
diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in
index f29d32f0d947dc7cde6112160e4f79ce8113505f..8fba4a19ba2cecd551aa4bbc764acc94615dd115 100644
--- a/paddle/scripts/submit_local.sh.in
+++ b/paddle/scripts/submit_local.sh.in
@@ -21,9 +21,7 @@ function version(){
         echo "    with_double: @WITH_DOUBLE@"
         echo "    with_python: @WITH_PYTHON@"
         echo "    with_rdma: @WITH_RDMA@"
-        echo "    with_metric_learning: @WITH_METRIC@"
         echo "    with_timer: @WITH_TIMER@"
-        echo "    with_predict_sdk: @WITH_PREDICT_SDK@"
 }
 
 function ver2num() {
@@ -94,16 +92,22 @@ else:
 EOF
 
 if [ $? -eq 1 ]; then  # Older version installed, or not installed at all
-   echo "First time run paddle, need to install some python dependencies."
-   BASEDIR=$(dirname "$0")
-   pip install ${BASEDIR}/../opt/paddle/share/wheels/*-@PADDLE_VERSION@-*.whl
-   if [ $? -ne 0 ]; then
-      echo "pip install wheels failed. "
-      echo "Please use 'sudo paddle' at the first time you use PaddlePaddle"
-      echo "PaddlePaddle will install some python dependencies automatically."
-      exit 1
-   fi
-   echo "Python dependencies are installed."
+    echo "First time run paddle, need to install some python dependencies."
+    # setuptools normalizes package version, so we need to use normalized
+    # package version for paddle python package
+    PYTHON_PADDLE_VERSION=$(python -c 'import packaging
+import setuptools
+print str(packaging.version.Version("@PADDLE_VERSION@"))
+' 2>/dev/null)
+    BASEDIR=$(dirname "$0")
+    pip install ${BASEDIR}/../opt/paddle/share/wheels/*-${PYTHON_PADDLE_VERSION}-*.whl
+    if [ $? -ne 0 ]; then
+	echo "pip install wheels failed. "
+	echo "Please use 'sudo paddle' at the first time you use PaddlePaddle"
+	echo "PaddlePaddle will install some python dependencies automatically."
+	exit 1
+    fi
+    echo "Python dependencies are installed."
 fi
 
 case "$1" in
diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh
deleted file mode 100755
index 80f031a74e7052d183b5ef21d432476ff1cce722..0000000000000000000000000000000000000000
--- a/paddle/scripts/travis/before_install.osx.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-brew update
-brew tap homebrew/science
-brew install openblas swig md5sha1sum
diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh
index 5e6350b57458594163f23cca41a546d7bd9b1eda..f2cbc561652a3c7502de94be37d75783fc40b9c1 100755
--- a/paddle/scripts/travis/build_and_test.sh
+++ b/paddle/scripts/travis/build_and_test.sh
@@ -2,18 +2,11 @@
 source ./common.sh
 
 NPROC=1
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
-  export PYTHONPATH=/opt/python/2.7.12/lib/python2.7/site-packages
-  export PYTHONHOME=/opt/python/2.7.12
-  export PATH=/opt/python/2.7.12/bin:${PATH}
-  cmake .. -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS}
-  NRPOC=`nproc`
-  make -j $NPROC
-  make coveralls
-  sudo make install
-elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
-  export PYTHONPATH=/usr/local/lib/python2.7/site-packages
-  cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS}
-  NPROC=`sysctl -n hw.ncpu`
-  make -j $NPROC
-fi
+export PYTHONPATH=/opt/python/2.7.12/lib/python2.7/site-packages
+export PYTHONHOME=/opt/python/2.7.12
+export PATH=/opt/python/2.7.12/bin:${PATH}
+cmake .. -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DON_TRAVIS=ON -DWITH_COVERAGE=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS}
+NRPOC=`nproc`
+make -j $NPROC
+make coveralls
+sudo make install
diff --git a/paddle/scripts/travis/docs.sh b/paddle/scripts/travis/docs.sh
index 6b43cad20b76e9abeb3cb10a726d3d8e3da5f8e2..53e998ef6c1b96d9e7d82b7effd12a27e6dc69f2 100755
--- a/paddle/scripts/travis/docs.sh
+++ b/paddle/scripts/travis/docs.sh
@@ -2,8 +2,12 @@
 
 # Add set -e, cd to directory.
 source ./common.sh
-
 # Compile Documentation only.
+cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_STYLE_CHECK=OFF ${EXTRA_CMAKE_OPTS}
+mkdir output
+make DESTDIR=./output install -j `nproc`
+pip install ./output/usr/local/opt/paddle/share/wheels/*
+rm -rf *
 cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DWITH_GPU=OFF -DWITH_DOC=ON ${EXTRA_CMAKE_OPTS}
 make paddle_docs paddle_docs_cn
 
@@ -25,26 +29,41 @@ TARGET_BRANCH="gh-pages"
 # Only deploy master branch to build latest documentation.
 SOURCE_BRANCH="master"
 
-# If is not a Github pull request, and in master branch.
-if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH"  ]; then
-  exit 0
-fi
-
 # Clone the repo to output directory
 git clone $REPO output
 cd output
 
-# checkout github page branch
-git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
+function deploy_docs() {
+  SOURCE_BRANCH=$1
+  DIR=$2
+  # If is not a Github pull request
+  if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
+    exit 0
+  fi
+  # If it is not watched branch.
+  if [ "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
+    return
+  fi
 
-# remove old docs. mv new docs.
-rm -rf doc doc_cn
-mv ../doc/cn/html doc_cn
-mv ../doc/en/html doc
+  # checkout github page branch
+  git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
+  
+  mkdir -p ${DIR}
+  # remove old docs. mv new docs.
+  set +e
+  rm -rf ${DIR}/doc ${DIR}/doc_cn
+  set -e
+  mv ../doc/cn/html ${DIR}/doc_cn
+  mv ../doc/en/html ${DIR}/doc
+  git add .
+}
+
+deploy_docs "master" "." 
+deploy_docs "develop" "./develop/"
 
 # Check is there anything changed.
 set +e
-git diff --exit-code >/dev/null
+git diff --cached --exit-code >/dev/null
 if [ $? -eq 0 ]; then
   echo "No changes to the output on this push; exiting."
   exit 0
@@ -57,7 +76,6 @@ if [ -n $SSL_KEY ]; then  # Only push updated docs for github.com/PaddlePaddle/P
   git config user.name "Travis CI"
   git config user.email "paddle-dev@baidu.com"
   git commit -m "Deploy to GitHub Pages: ${SHA}"
-
   # Set ssh private key
   openssl aes-256-cbc -K $SSL_KEY -iv $SSL_IV -in ../../paddle/scripts/travis/deploy_key.enc -out deploy_key -d
   chmod 600 deploy_key
diff --git a/paddle/setup.py.in b/paddle/setup.py.in
index 38621af065913c9edd44958e9fb767c983c00dbb..0b62436a7f81682d5279c3b307ac1abf09eafffb 100644
--- a/paddle/setup.py.in
+++ b/paddle/setup.py.in
@@ -12,67 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This file is used to build paddle python binding package.
-# It will be invoked by Makefile that generated by COMAKE
 
 from setuptools import setup, Extension
 
-import numpy as np
-import api.paddle_ld_flags
-import platform
-import os
-
-system = platform.system().lower()
-
-is_osx = (system == 'darwin')
-is_win = (system == 'windows')
-is_lin = (system == 'linux')
-
-
-# The extra links will passed from COMAKE
-#   because generate paddle LDFLAGS is too complicated to do in setup.py
-#   it just read COMAKE generated LDFLAGS.
-extra_comps = []
-extra_links = []
-obj = api.paddle_ld_flags.PaddleLDFlag()
-extra_comps = obj.c_flag()
-ldflags = obj.ldflag_str()
-if ldflags is not None:
-  extra_links.extend(ldflags.split(" "))
-
-try:
-  with open('.py_paddle_extra_link_flags', 'r') as f:
-    for line in f:
-      extra_links += line.split()
-except:
-  pass
-
-if is_lin == True:
-    extra_links = ["-Xlinker", '-start-group'] + extra_links + ["-Xlinker", "-end-group"]
-elif is_osx == True:
-    os.environ["ARCHFLAGS"] = "-arch x86_64"
-    extra_links = ["-Wl,-all_load"] + extra_links
-
-include_dirs = [np.get_include(), "../"]    # include numpy and paddle.
-
-os.environ["CC"] = "@CMAKE_C_COMPILER@"
-os.environ["CXX"] = "@CMAKE_CXX_COMPILER@"
-
 setup(name="py_paddle",
-  version="@PADDLE_VERSION@",
-  ext_modules=[
-    Extension('py_paddle._swig_paddle',      # Build SWIG Extension.
-       ['Paddle_wrap.cxx'],
-       language = "c++",
-       include_dirs = include_dirs,
-       extra_link_args = extra_links,
-       extra_compile_args = extra_comps
-    )
-  ],
-  packages=['py_paddle'],
-  include_dirs = include_dirs,
-  install_requires = [
-    'numpy>=1.8.0',      # The numpy is required.
-    'protobuf>=3.0.0'    # The paddle protobuf version
-  ],
+      version="${PADDLE_VERSION}",
+      packages=['py_paddle'],
+      include_package_data=True,
+      package_data={'py_paddle':['*.py','_swig_paddle.so']},
+      install_requires = [
+        'nltk>=3.2.2',
+        'numpy>=1.8.0',      # The numpy is required.
+        'protobuf>=${PROTOBUF_VERSION}'    # The paddle protobuf version
+      ],
+      url='http://www.paddlepaddle.org/',
+      license='Apache 2.0',
 )
diff --git a/paddle/trainer/Tester.cpp b/paddle/trainer/Tester.cpp
index 13aa28ae5d9699d267858d48e46797c756487ddd..80664fa877b324af73e3e3effa11e46eac6294e2 100644
--- a/paddle/trainer/Tester.cpp
+++ b/paddle/trainer/Tester.cpp
@@ -208,7 +208,7 @@ real Tester::forwardOneBatch(const DataBatch& dataBatch,
     return 0.0;  // In this case, there is no meaning to calculate cost
   }
 
-  return Argument::sumCosts(outArgs);
+  return Argument::sum(outArgs);
 }
 
 void Tester::testOnePassBatch(int passId) {
diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp
index bd84545375117b178d4324f0ad03f5bc35ae925d..b68e29cd5ea223272151e7a8b52d998832f47103 100644
--- a/paddle/trainer/Trainer.cpp
+++ b/paddle/trainer/Trainer.cpp
@@ -310,7 +310,7 @@ real Trainer::checkGradient() {
   std::vector outArgs;
 
   trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
-  real cost = Argument::sumCosts(outArgs);
+  real cost = Argument::sum(outArgs);
   LOG(INFO) << "original cost=" << cost;
   trainerInternal_.getGradientMachine()->backward();
 
@@ -340,7 +340,7 @@ real Trainer::checkGradient() {
     parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara);
     parameter->setValueUpdated();
     trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
-    real newCost1 = Argument::sumCosts(outArgs);
+    real newCost1 = Argument::sum(outArgs);
 
     for (size_t i = 0; i < dim; ++i) {
       newp[i] = oldp[i] - step * d[i];
@@ -349,7 +349,7 @@ real Trainer::checkGradient() {
     parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara);
     parameter->setValueUpdated();
     trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC);
-    real newCost2 = Argument::sumCosts(outArgs);
+    real newCost2 = Argument::sum(outArgs);
 
     real trueDelta = 0.5 * (newCost1 - newCost2);
     real diff = (1e-20 + trueDelta) / (1e-20 + delta) - 1;
@@ -575,7 +575,7 @@ real Trainer::calcGradient(const DataBatch& dataBatch,
 
   trainerInternal_.getGradientMachine()->forwardBackward(
       inArgs, &outArgs, PASS_TRAIN);
-  real cost = Argument::sumCosts(outArgs);
+  real cost = Argument::sum(outArgs);
 
   offset = 0;
   for (auto& para : parameters) {
diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h
index c8ee4726c24c335ceda22ea3a20049b01d11c149..fac589d1d711affcd008f90edf87d865c8362f69 100644
--- a/paddle/trainer/Trainer.h
+++ b/paddle/trainer/Trainer.h
@@ -30,10 +30,6 @@ limitations under the License. */
 #include "TrainerConfigHelper.h"
 #include "TrainerInternal.h"
 
-#ifdef PADDLE_METRIC_LEARNING
-#include "paddle/internals/metric_learning/MetricTrainer.h"
-#endif
-
 DECLARE_int32(num_passes);
 
 namespace paddle {
@@ -201,12 +197,8 @@ protected:
   // parameter util
   std::unique_ptr paramUtil_;
 
-#ifdef PADDLE_METRIC_LEARNING
-  MetricTrainer trainerInternal_;
-#else
   // trainer Internal
   TrainerInternal trainerInternal_;
-#endif
 };
 
 }  // namespace paddle
diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/trainer/TrainerInternal.cpp
index f3b465b444167d4624a5e99c30e1257eda53ca2c..4c5d4a0913aaf3a9932b3d67806378ece4245304 100644
--- a/paddle/trainer/TrainerInternal.cpp
+++ b/paddle/trainer/TrainerInternal.cpp
@@ -134,7 +134,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId,
   real cost = 0;
   {
     REGISTER_TIMER("sumCost");
-    cost = Argument::sumCosts(*outArgs);
+    cost = Argument::sum(*outArgs);
   }
 
   if (batchId % intconfig_->log_period == 0) {
diff --git a/paddle/utils/.gitignore b/paddle/utils/.gitignore
index 956b606a18cae1bb11322accfa174ae5ce1580de..f2cfd7409412de68f4183daebcb48e7a3ae37672 100644
--- a/paddle/utils/.gitignore
+++ b/paddle/utils/.gitignore
@@ -1,2 +1 @@
 enable_virtualenv.c
-PythonUtil.cpp
diff --git a/paddle/utils/Any.h b/paddle/utils/Any.h
new file mode 100644
index 0000000000000000000000000000000000000000..99a0139accc4988f1e4cce45eeb688a6603c2c31
--- /dev/null
+++ b/paddle/utils/Any.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+#if __cplusplus > 201402L
+#include 
+
+namespace paddle {
+// using std::any for C++ 17
+using std::any;
+using std::any_cast;
+using std::bad_any_cast;
+}  // namespace paddle
+
+#else
+#include 
+
+namespace paddle {
+// use linb::any for C++ 11
+using linb::any;
+using linb::any_cast;
+using linb::bad_any_cast;
+}  // namespace paddle
+#endif
diff --git a/paddle/utils/CMakeLists.txt b/paddle/utils/CMakeLists.txt
index 10d906ee16656a808122b81d8b2fef55b8e7b7e9..171eae381af70e9e76210a77a409e56a527cab06 100644
--- a/paddle/utils/CMakeLists.txt
+++ b/paddle/utils/CMakeLists.txt
@@ -1,7 +1,4 @@
 # The utilities for paddle
-
-configure_file(PythonUtil.cpp.in ${PROJ_ROOT}/paddle/utils/PythonUtil.cpp)
-
 file(GLOB UTIL_HEADERS . *.h)
 file(GLOB UTIL_SOURCES . *.cpp)
 create_resources(enable_virtualenv.py enable_virtualenv.c)
diff --git a/paddle/utils/CpuId.cpp b/paddle/utils/CpuId.cpp
index 8eefdd2980e7f56a836df6fd2ff8c31b81a55555..edd33c454122d95078e0fde2a2e9d68903951ee8 100644
--- a/paddle/utils/CpuId.cpp
+++ b/paddle/utils/CpuId.cpp
@@ -19,7 +19,7 @@ limitations under the License. */
 /// for MSVC
 #define CPUID(info, x) __cpuidex(info, x, 0)
 
-#else
+#elif !defined(__ANDROID__)
 
 #include 
 
@@ -31,6 +31,7 @@ limitations under the License. */
 namespace paddle {
 
 SIMDFlags::SIMDFlags() {
+#if !defined(__ANDROID__)
   unsigned int cpuInfo[4];
   // CPUID: https://en.wikipedia.org/wiki/CPUID
   // clang-format off
@@ -51,6 +52,9 @@ SIMDFlags::SIMDFlags() {
   CPUID(cpuInfo, 0x80000001);
   simd_flags_ |= cpuInfo[2] & (1 << 16) ? SIMD_FMA4  : SIMD_NONE;
   // clang-fotmat on
+#else
+  simd_flags_ = SIMD_NEON;
+#endif
 }
 
 SIMDFlags const* SIMDFlags::instance() {
diff --git a/paddle/utils/CpuId.h b/paddle/utils/CpuId.h
index 0f3985cc7b2c018ede9bba9644d2d096561dccee..869be5be541dafd699a87a8e8893aadadf59b711 100644
--- a/paddle/utils/CpuId.h
+++ b/paddle/utils/CpuId.h
@@ -12,6 +12,7 @@ limitations under the License. */
 #pragma once
 
 #include "Common.h"
+#include "Error.h"
 
 namespace paddle {
 
@@ -29,6 +30,7 @@ enum simd_t {
   SIMD_AVX    = 1 << 8,     ///< AVX
   SIMD_AVX2   = 1 << 9,     ///< AVX 2
   SIMD_AVX512 = 1 << 10,    ///< AVX 512
+  SIMD_NEON   = 1 << 11,    ///  NEON
 };
 // clang-format on
 
@@ -95,6 +97,40 @@ private:
 #define HAS_AVX     HAS_SIMD(SIMD_AVX)
 #define HAS_AVX2    HAS_SIMD(SIMD_AVX2)
 #define HAS_AVX512  HAS_SIMD(SIMD_AVX512)
+#define HAS_NEON    HAS_SIMD(SIMD_NEON)
 // clang-format on
 
+/**
+ * Invoke checkCPUFeature() before Paddle initialization to
+ * check target machine whether support compiled instructions.
+ * If not, simply throw out an error.
+ */
+inline Error __must_check checkCPUFeature() {
+  Error err;
+#ifndef __AVX__
+  if (HAS_AVX) {
+    LOG(WARNING) << "PaddlePaddle wasn't compiled to use avx instructions, "
+                 << "but these are available on your machine and could "
+                 << "speed up CPU computations via CMAKE .. -DWITH_AVX=ON";
+  }
+#else
+  if (!HAS_AVX) {
+    err = Error(
+        "PaddlePaddle was compiled to use avx instructions, "
+        "but these aren't available on your machine, please "
+        "disable it via CMAKE .. -DWITH_AVX=OFF");
+  }
+#endif  // __AVX__
+#ifdef __SSE3__
+  if (!HAS_SSE3) {
+    err = Error(
+        "PaddlePaddle was compiled to use sse3 instructions, "
+        "which is the minimum requirement of PaddlePaddle. "
+        "But these aren't available on your current machine.");
+  }
+#endif  // __SSE3__
+
+  return err;
+}
+
 }  // namespace paddle
diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp
index e8f31bc811ac30d83e8203b784ee1f93a8d35d90..320f671ed97dbadc4fa1b4b52d5611cf9239e7dd 100644
--- a/paddle/utils/Flags.cpp
+++ b/paddle/utils/Flags.cpp
@@ -30,7 +30,6 @@ DEFINE_bool(parallel_nn,
 DEFINE_int32(trainer_count, 1, "Defined how many trainers to train");
 DEFINE_int32(gpu_id, 0, "Which gpu core to use");
 DEFINE_int32(port, 20134, "Listening port for pserver");
-DEFINE_int32(data_server_port, 21134, "Listening port for dserver");
 DEFINE_int32(ports_num,
              1,
              "Number of ports for sending dense parameter,"
diff --git a/paddle/utils/Flags.h b/paddle/utils/Flags.h
index 3e72f8356d883b353127ccae80f2881320d20b2b..dc4faef8331ed47b9ce3e952389b6469cd9fda2e 100644
--- a/paddle/utils/Flags.h
+++ b/paddle/utils/Flags.h
@@ -19,7 +19,6 @@ limitations under the License. */
 DECLARE_bool(parallel_nn);
 DECLARE_int32(async_count);
 DECLARE_int32(port);
-DECLARE_int32(data_server_port);
 DECLARE_bool(use_gpu);
 DECLARE_int32(gpu_id);
 DECLARE_int32(trainer_count);
diff --git a/paddle/utils/GlobalConstants.h b/paddle/utils/GlobalConstants.h
index 707346f2c76e59b50722f4f8805ebe56c3cf861b..0ec1c28dfbb2a7db9fa84c9eb2bc4dad806b78e9 100644
--- a/paddle/utils/GlobalConstants.h
+++ b/paddle/utils/GlobalConstants.h
@@ -23,11 +23,6 @@ enum PassType {
   PASS_TEST,    // Test pass
   PASS_GC,      // Gradient Check pass
   PASS_METRIC,  // pass for generate template output with no drop rate.
-  // pass for metric learning training with metric learning error, only used
-  // when we are doing KNN evaluation.
-  PASS_METRIC_TRAIN,
-  PASS_METRIC_TRAIN_WITH_NOERROR,  // Pass for metric learning training
-                                   // with no evaluation.
 };
 
 enum ParameterType {
diff --git a/paddle/utils/Logging.cpp b/paddle/utils/Logging.cpp
index 5a1c6ecb2219f7983609c27f3215c7fc1e9e9ef2..ea96bad240ad81c4c29b7dab35b015549052e2bb 100644
--- a/paddle/utils/Logging.cpp
+++ b/paddle/utils/Logging.cpp
@@ -18,6 +18,7 @@ limitations under the License. */
  */
 
 #include "Logging.h"
+#include 
 
 namespace paddle {
 
diff --git a/paddle/utils/PythonUtil.cpp.in b/paddle/utils/PythonUtil.cpp
similarity index 98%
rename from paddle/utils/PythonUtil.cpp.in
rename to paddle/utils/PythonUtil.cpp
index 66b5795e29fb9fa751ed802e87ced0a71aea4c51..7faeff55c28b9065179ad27b3b604a9f411249e5 100644
--- a/paddle/utils/PythonUtil.cpp.in
+++ b/paddle/utils/PythonUtil.cpp
@@ -195,10 +195,6 @@ extern const char enable_virtualenv_py[];
 }
 void initPython(int argc, char** argv) {
 #ifndef PADDLE_NO_PYTHON
-  char pyHome[] = "@PYTHON_INSTALL_DIR@"; // NOLINT
-  if (strlen(pyHome)) {
-    Py_SetPythonHome(pyHome);
-  }
   Py_SetProgramName(argv[0]);
   Py_Initialize();
   PySys_SetArgv(argc, argv);
diff --git a/paddle/utils/StringUtil.h b/paddle/utils/StringUtil.h
index 0b4f4c9113ae9d714b634b67931e51b408bbe777..95f071cb7de87d87f6988c136d7993c66fa9dde1 100644
--- a/paddle/utils/StringUtil.h
+++ b/paddle/utils/StringUtil.h
@@ -54,6 +54,25 @@ inline T toWithStatus(const std::string& s, bool* ok = nullptr) {
   return v;
 }
 
+/**
+ * Cast type T to string with status.
+ *
+ * @param [in] v input value of type T.
+ * @param [out] ok status, return true if there is no error in casting. Set
+ *              nullptr if user don't care error at all.
+ * @return result of casting. If error occurred, a empty string will be
+ *              returned.
+ */
+template 
+inline std::string toWithStatus(const T v, bool* ok = nullptr) {
+  std::ostringstream sout;
+  sout << v;
+  if (ok) {
+    *ok = !sout.fail();
+  }
+  return sout.str();
+}
+
 /// Convert string to type T. It makes sure all the characters in s are used.
 /// Otherwise it will abort.
 ///
@@ -67,6 +86,18 @@ inline T to(const std::string& s) {
   return v;
 }
 
+/// Convert type T to string.
+///
+/// @tparam T type of input value
+/// @param v input value of type T
+template 
+std::string to_string(T v) {
+  bool ok;
+  std::string s = toWithStatus(v, &ok);
+  CHECK(ok) << "Cannot convert v(" << v << ") to type std::string";
+  return s;
+}
+
 }  // namespace str
 
 #undef DEFINE_STRING_CONVERSION
diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp
index dbab4ec43ca2fa691445131d2cb14f51721a2e4c..b18b73e06a6c39c3bf9717280bc6323917c80efb 100644
--- a/paddle/utils/Util.cpp
+++ b/paddle/utils/Util.cpp
@@ -15,17 +15,23 @@ limitations under the License. */
 #include "Util.h"
 
 #include 
-#include 
 #include 
 #include 
 #include 
+
+#ifdef __SSE__
 #include 
+#endif
+#ifdef __SSE3__
+#include 
+#endif
 
 #include 
 #include 
 
 #include 
 
+#include "CpuId.h"
 #include "CustomStackTrace.h"
 #include "Logging.h"
 #include "StringUtil.h"
@@ -162,8 +168,12 @@ void initMain(int argc, char** argv) {
 
   installProfilerSwitch();
 
+#ifdef __SSE__
   _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
+#endif
+#ifdef __SSE3__
   _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
+#endif
 
   if (FLAGS_seed == 0) {
     unsigned int t = time(NULL);
@@ -185,6 +195,7 @@ void initMain(int argc, char** argv) {
   }
 
   version::printVersion();
+  checkCPUFeature().check();
   runInitFunctions();
 }
 
diff --git a/paddle/utils/arch/linux/Locks.cpp b/paddle/utils/arch/linux/Locks.cpp
index 2a6f96e04d024ac3977bc154dbeeb69ce9ab3a5d..310c9a6542563891d4ba5888e58406ea28d6a2ce 100644
--- a/paddle/utils/arch/linux/Locks.cpp
+++ b/paddle/utils/arch/linux/Locks.cpp
@@ -15,6 +15,7 @@ limitations under the License. */
 #include "paddle/utils/Locks.h"
 #include 
 #include 
+#include "paddle/utils/Logging.h"
 
 namespace paddle {
 class SemaphorePrivate {
@@ -26,7 +27,10 @@ Semaphore::Semaphore(int initValue) : m(new SemaphorePrivate()) {
   sem_init(&m->sem, 0, initValue);
 }
 
-Semaphore::~Semaphore() { sem_destroy(&m->sem); }
+Semaphore::~Semaphore() {
+  sem_destroy(&m->sem);
+  delete m;
+}
 
 bool Semaphore::timeWait(struct timespec* ts) {
   return (0 == sem_timedwait(&m->sem, ts));
@@ -36,36 +40,101 @@ void Semaphore::wait() { sem_wait(&m->sem); }
 
 void Semaphore::post() { sem_post(&m->sem); }
 
+#ifdef PADDLE_USE_PTHREAD_SPINLOCK
+
 class SpinLockPrivate {
 public:
   inline SpinLockPrivate() { pthread_spin_init(&lock_, 0); }
   inline ~SpinLockPrivate() { pthread_spin_destroy(&lock_); }
+
+  inline void lock() { pthread_spin_lock(&lock_); }
+  inline void unlock() { pthread_spin_unlock(&lock_); }
+
   pthread_spinlock_t lock_;
   char padding_[64 - sizeof(pthread_spinlock_t)];
 };
 
-SpinLock::SpinLock() : m(new SpinLockPrivate()) {}
+#else
 
-SpinLock::~SpinLock() { delete m; }
+#include 
+class SpinLockPrivate {
+public:
+  inline void lock() {
+    while (lock_.test_and_set(std::memory_order_acquire)) {
+    }
+  }
+  inline void unlock() { lock_.clear(std::memory_order_release); }
+
+  std::atomic_flag lock_ = ATOMIC_FLAG_INIT;
+  char padding_[64 - sizeof(lock_)];  // Padding to cache line size
+};
 
-void SpinLock::lock() { pthread_spin_lock(&m->lock_); }
+#endif
 
-void SpinLock::unlock() { pthread_spin_unlock(&m->lock_); }
+SpinLock::SpinLock() : m(new SpinLockPrivate()) {}
+SpinLock::~SpinLock() { delete m; }
+void SpinLock::lock() { m->lock(); }
+void SpinLock::unlock() { m->unlock(); }
+
+#ifdef PADDLE_USE_PTHREAD_BARRIER
 
 class ThreadBarrierPrivate {
 public:
   pthread_barrier_t barrier_;
+
+  inline explicit ThreadBarrierPrivate(int count) {
+    pthread_barrier_init(&barrier_, nullptr, count);
+  }
+
+  inline ~ThreadBarrierPrivate() { pthread_barrier_destroy(&barrier_); }
+
+  inline void wait() { pthread_barrier_wait(&barrier_); }
 };
 
-ThreadBarrier::ThreadBarrier(int count) : m(new ThreadBarrierPrivate()) {
-  pthread_barrier_init(&m->barrier_, nullptr, count);
-}
+#else
 
-ThreadBarrier::~ThreadBarrier() {
-  pthread_barrier_destroy(&m->barrier_);
-  delete m;
-}
+class ThreadBarrierPrivate {
+public:
+  pthread_mutex_t mutex_;
+  pthread_cond_t cond_;
+  int count_;
+  int tripCount_;
+
+  inline explicit ThreadBarrierPrivate(int cnt) : count_(0), tripCount_(cnt) {
+    CHECK_NE(cnt, 0);
+    CHECK_GE(pthread_mutex_init(&mutex_, 0), 0);
+    CHECK_GE(pthread_cond_init(&cond_, 0), 0);
+  }
+
+  inline ~ThreadBarrierPrivate() {
+    pthread_cond_destroy(&cond_);
+    pthread_mutex_destroy(&mutex_);
+  }
+
+  /**
+   * @brief wait
+   * @return true if the last wait
+   */
+  inline bool wait() {
+    pthread_mutex_lock(&mutex_);
+    ++count_;
+    if (count_ >= tripCount_) {
+      count_ = 0;
+      pthread_cond_broadcast(&cond_);
+      pthread_mutex_unlock(&mutex_);
+      return true;
+    } else {
+      pthread_cond_wait(&cond_, &mutex_);
+      pthread_mutex_unlock(&mutex_);
+      return false;
+    }
+  }
+};
+
+#endif
 
-void ThreadBarrier::wait() { pthread_barrier_wait(&m->barrier_); }
+ThreadBarrier::ThreadBarrier(int count) : m(new ThreadBarrierPrivate(count)) {}
+ThreadBarrier::~ThreadBarrier() { delete m; }
+void ThreadBarrier::wait() { m->wait(); }
 
 }  // namespace paddle
diff --git a/paddle/utils/tests/test_CustomStackTrace.cpp b/paddle/utils/tests/test_CustomStackTrace.cpp
index 378788bcecd579fff1c762702a8c27f54cee94bf..b5d9f93f1376048eabd726331006b0bb848bce11 100644
--- a/paddle/utils/tests/test_CustomStackTrace.cpp
+++ b/paddle/utils/tests/test_CustomStackTrace.cpp
@@ -19,6 +19,7 @@ limitations under the License. */
 
 #include "paddle/utils/CustomStackTrace.h"
 #include "paddle/utils/Locks.h"
+#include "paddle/utils/StringUtil.h"
 #include "paddle/utils/Util.h"
 
 DEFINE_int32(test_thread_num, 10, "testing thread number");
@@ -69,11 +70,11 @@ TEST(CustomStackTrace, normalTrain) {
     while (countDown-- > 0) {
       start.wait();
       for (size_t i = 0; i < layerSize; ++i) {
-        tracer.push("layer_" + std::to_string(i));
+        tracer.push("layer_" + paddle::str::to_string(i));
       }
       tracer.pop("");
       for (size_t i = 0; i < layerSize; ++i) {
-        tracer.pop("layer_" + std::to_string(layerSize - 1 - i));
+        tracer.pop("layer_" + paddle::str::to_string(layerSize - 1 - i));
       }
       finish.wait();
     }
@@ -89,7 +90,7 @@ TEST(CustomStackTrace, normalTest) {
     while (countDown-- > 0) {
       start.wait();
       for (size_t i = 0; i < layerSize; ++i) {
-        tracer.push("layer_" + std::to_string(i));
+        tracer.push("layer_" + paddle::str::to_string(i));
       }
       tracer.clear();  // in forward test, tracer will clear after forward.
       finish.wait();
diff --git a/paddle/utils/tests/test_CustomStackTracePrint.cpp b/paddle/utils/tests/test_CustomStackTracePrint.cpp
index 611b16aa7116d03ee51ba0095d043b78df1742ba..360c61c88a757da708b01d2bb54068b948b235cc 100644
--- a/paddle/utils/tests/test_CustomStackTracePrint.cpp
+++ b/paddle/utils/tests/test_CustomStackTracePrint.cpp
@@ -13,13 +13,14 @@ See the License for the specific language governing permissions and
 limitations under the License. */
 
 #include "paddle/utils/CustomStackTrace.h"
+#include "paddle/utils/StringUtil.h"
 #include "paddle/utils/Util.h"
 
 int main(int argc, char** argv) {
   paddle::initMain(argc, argv);
 
   for (size_t i = 0; i < 1000; ++i) {
-    paddle::gLayerStackTrace.push("layer_" + std::to_string(i));
+    paddle::gLayerStackTrace.push("layer_" + paddle::str::to_string(i));
     if (i == 998) {
       throw "Unhandle exception";
     }
diff --git a/paddle/utils/tests/test_SIMDFlags.cpp b/paddle/utils/tests/test_SIMDFlags.cpp
index 8200a24ce7b7df75b48a89fbb7af15f304c5957f..185789c927be19385d6ddc7a1889b6cc56109d38 100644
--- a/paddle/utils/tests/test_SIMDFlags.cpp
+++ b/paddle/utils/tests/test_SIMDFlags.cpp
@@ -18,7 +18,8 @@ limitations under the License. */
 using namespace paddle;  // NOLINT
 
 TEST(SIMDFlags, gccTest) {
-#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__))
+#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__)) && \
+    !defined(__arm__)
   // clang-format off
   CHECK(!__builtin_cpu_supports("sse")    != HAS_SSE);
   CHECK(!__builtin_cpu_supports("sse2")   != HAS_SSE2);
@@ -43,4 +44,5 @@ TEST(SIMDFlags, normalPrint) {
   LOG(INFO) << "Has AVX:     " << std::boolalpha << HAS_AVX;
   LOG(INFO) << "Has AVX2:    " << std::boolalpha << HAS_AVX2;
   LOG(INFO) << "Has AVX512:  " << std::boolalpha << HAS_AVX512;
+  LOG(INFO) << "Has NEON:    " << std::boolalpha << HAS_NEON;
 }
diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto
index 65d5d50277b665e7c355202d6e8043f656ae92f1..4f9b53d6f6553e55406dd000029a598a92fd2fb6 100644
--- a/proto/ModelConfig.proto
+++ b/proto/ModelConfig.proto
@@ -441,6 +441,11 @@ message LayerConfig {
 
   // blank label used in ctc loss
   optional uint32 blank = 52 [default = 0];
+
+  // stride parameter for seqlastins layer, AverageLayer, MaxLayer, which 
+  // controls the scope of pooling operation. can be set > 0.
+  // leave empty or set to -1 to disable this stride pooling.
+  optional int32 seq_pool_stride = 53 [default = -1];
 }
 
 message EvaluatorConfig {
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
index 71af50a9a4ed835635362c24d6c1ae5e92de050b..e7a0895533dd8902df9a012ab230df2a67256483 100644
--- a/python/CMakeLists.txt
+++ b/python/CMakeLists.txt
@@ -4,7 +4,7 @@ set(OUTPUT_DIR
 file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py)
 file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py)
 file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py)
-file(GLOB V2_PY_FILES . ./paddle/v2/*.py)
+file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py)
 
 set(PY_FILES paddle/__init__.py
              ${TRAINER_PY_FILES}
@@ -24,8 +24,9 @@ add_custom_target(paddle_python ALL DEPENDS
     ${OUTPUT_DIR}/.timestamp)
 
 add_subdirectory(paddle/trainer_config_helpers/tests)
-add_subdirectory(paddle/reader/tests)
 add_subdirectory(paddle/v2/tests)
+add_subdirectory(paddle/v2/reader/tests)
+add_subdirectory(paddle/v2/plot/tests)
 
 install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dist/
     DESTINATION opt/paddle/share/wheels
diff --git a/python/paddle/reader/tests/CMakeLists.txt b/python/paddle/reader/tests/CMakeLists.txt
deleted file mode 100644
index 502c897d8946a838847c1c23b1236358c58c088e..0000000000000000000000000000000000000000
--- a/python/paddle/reader/tests/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_test(NAME reader_decorator_test
-  COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
-        ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/reader/tests/decorator_test.py
-    WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle)
diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py
index bd24c68b6fe88eab03c814f8cac70db3880316f4..0e752c117c1ecfab72e2da2f830380e9524236e7 100644
--- a/python/paddle/trainer/PyDataProvider2.py
+++ b/python/paddle/trainer/PyDataProvider2.py
@@ -45,6 +45,23 @@ class CacheType(object):
 
 
 class InputType(object):
+    """
+    InputType is the base class for paddle input types.
+
+    ..  note::
+
+        this is a base class, and should never be used by user.
+
+    :param dim: dimension of input. If the input is an integer, it means the
+                value range. Otherwise, it means the size of layer.
+    :type dim: int
+    :param seq_type: sequence type of input. 0 means it is not a sequence. 1
+                     means it is a variable length sequence. 2 means it is a
+                     nested sequence.
+    :type seq_type: int
+    :param type: data type of input.
+    :type type: int
+    """
     __slots__ = ['dim', 'seq_type', 'type']
 
     def __init__(self, dim, seq_type, tp):
@@ -54,19 +71,63 @@ class InputType(object):
 
 
 def dense_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
+    """
+    Dense Vector. It means the input feature is dense float vector. For example,
+    if the input is an image with 28*28 pixels, the input of Paddle neural
+    network should be a dense vector with dimension 784.
+
+    :param dim: dimension of this vector.
+    :type dim: int
+    :param seq_type: sequence type of input.
+    :type seq_type: int
+    :return: An input type object.
+    :rtype: InputType
+    """
     return InputType(dim, seq_type, DataType.Dense)
 
 
 def sparse_non_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
+    """
+    Sparse binary vector. It means the input feature is a sparse vector and the
+    every element in this vector is either zero or one.
+
+    :param dim: dimension of this vector.
+    :type dim: int
+    :param seq_type: sequence type of this input.
+    :type seq_type: int
+    :return: An input type object.
+    :rtype: InputType
+    """
     return InputType(dim, seq_type, DataType.SparseNonValue)
 
 
 def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
+    """
+    Sparse vector. It means the input feature is a sparse vector. Most of the
+    elements in this vector are zero, others could be any float value.
+
+    :param dim: dimension of this vector.
+    :type dim: int
+    :param seq_type: sequence type of this input.
+    :type seq_type: int
+    :return: An input type object.
+    :rtype: InputType
+    """
     return InputType(dim, seq_type, DataType.SparseValue)
 
 
-def index_slot(dim, seq_type=SequenceType.NO_SEQUENCE):
-    return InputType(dim, seq_type, DataType.Index)
+def index_slot(value_range, seq_type=SequenceType.NO_SEQUENCE):
+    """
+    Data type of integer.
+
+    :param seq_type: sequence type of this input.
+    :type seq_type: int
+    :param value_range: range of this integer.
+    :type value_range: int
+    :return: An input type object
+    :rtype: InputType
+    """
+    return InputType(value_range, seq_type, DataType.Index)
 
 
 dense_vector = dense_slot
@@ -76,6 +137,14 @@ integer_value = index_slot
 
 
 def dense_vector_sequence(dim):
+    """
+    Data type of a sequence of dense vector.
+
+    :param dim: dimension of dense vector.
+    :type dim: int
+    :return: An input type object
+    :rtype: InputType
+    """
     return dense_vector(dim, seq_type=SequenceType.SEQUENCE)
 
 
@@ -84,6 +153,15 @@ def dense_vector_sub_sequence(dim):
 
 
 def sparse_binary_vector_sequence(dim):
+    """
+    Data type of a sequence of sparse vector, which every element is either zero
+     or one.
+
+    :param dim: dimension of sparse vector.
+    :type dim: int
+    :return: An input type object
+    :rtype: InputType
+    """
     return sparse_binary_vector(dim, seq_type=SequenceType.SEQUENCE)
 
 
@@ -92,6 +170,15 @@ def sparse_binary_vector_sub_sequence(dim):
 
 
 def sparse_vector_sequence(dim):
+    """
+    Data type of a sequence of sparse vector, which most elements are zero,
+    others could be any float value.
+
+    :param dim: dimension of sparse vector.
+    :type dim: int
+    :return: An input type object
+    :rtype: InputType
+    """
     return sparse_vector(dim, seq_type=SequenceType.SEQUENCE)
 
 
@@ -99,8 +186,14 @@ def sparse_vector_sub_sequence(dim):
     return sparse_vector(dim, seq_type=SequenceType.SUB_SEQUENCE)
 
 
-def integer_value_sequence(dim):
-    return integer_value(dim, seq_type=SequenceType.SEQUENCE)
+def integer_value_sequence(value_range):
+    """
+    Data type of a sequence of integer.
+
+    :param value_range: range of each element.
+    :type value_range: int
+    """
+    return integer_value(value_range, seq_type=SequenceType.SEQUENCE)
 
 
 def integer_value_sub_sequence(dim):
diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py
index da937152ee0ce788309690c7b718943bb21b5a76..dc89419c40f8d527a3de0dc90ede0397f6f650c5 100644
--- a/python/paddle/trainer/config_parser.py
+++ b/python/paddle/trainer/config_parser.py
@@ -686,25 +686,17 @@ class ContextProjection(Projection):
 
 
 @config_class
-class ConvProjection(Projection):
-    type = 'conv'
-
+class ConvBaseProjection(Projection):
     def __init__(self,
                  input_layer_name,
                  num_filters=None,
                  conv_conf=None,
                  **xargs):
-        super(ConvProjection, self).__init__(input_layer_name, **xargs)
+        super(ConvBaseProjection, self).__init__(input_layer_name, **xargs)
 
         if num_filters is not None:
             self.proj_conf.num_filters = num_filters
 
-        parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf,
-                   num_filters)
-        self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
-                                     self.proj_conf.conv_conf.output_y * \
-                                     num_filters
-
     def calc_output_size(self, input_layer_config):
         return self.proj_conf.output_size
 
@@ -723,6 +715,48 @@ class ConvProjection(Projection):
         return None
 
 
+@config_class
+class ConvProjection(ConvBaseProjection):
+    type = 'conv'
+
+    def __init__(self,
+                 input_layer_name,
+                 num_filters=None,
+                 conv_conf=None,
+                 **xargs):
+        super(ConvProjection, self).__init__(input_layer_name, num_filters,
+                                             conv_conf, **xargs)
+
+        parse_conv(conv_conf, self.input_layer_name, self.proj_conf.conv_conf,
+                   num_filters)
+        self.proj_conf.output_size = self.proj_conf.conv_conf.output_x * \
+                                     self.proj_conf.conv_conf.output_y * \
+                                     num_filters
+
+
+@config_class
+class ConvTransProjection(ConvBaseProjection):
+    type = 'convt'
+
+    def __init__(self,
+                 input_layer_name,
+                 num_filters=None,
+                 conv_conf=None,
+                 **xargs):
+        super(ConvTransProjection, self).__init__(input_layer_name, num_filters,
+                                                  conv_conf, **xargs)
+
+        parse_conv(
+            conv_conf,
+            self.input_layer_name,
+            self.proj_conf.conv_conf,
+            num_filters,
+            trans=True)
+        self.proj_conf.output_size = self.proj_conf.conv_conf.img_size_y * \
+                                     self.proj_conf.conv_conf.img_size * \
+                                     num_filters
+
+
 # Define a operator for mixed layer
 @config_class
 class Operator(Cfg):
@@ -789,6 +823,36 @@ class ConvOperator(Operator):
         return self.operator_conf.output_size
 
 
+@config_class
+class ConvTransOperator(Operator):
+    type = 'convt'
+
+    def __init__(self,
+                 input_layer_names,
+                 num_filters=None,
+                 conv_conf=None,
+                 **xargs):
+        super(ConvTransOperator, self).__init__(input_layer_names, **xargs)
+        if num_filters is not None:
+            self.operator_conf.num_filters = num_filters
+
+        parse_conv(
+            conv_conf,
+            MakeLayerNameInSubmodel(input_layer_names[0]),
+            self.operator_conf.conv_conf,
+            num_filters,
+            trans=True)
+        self.operator_conf.output_size = \
+            self.operator_conf.conv_conf.img_size * \
+            self.operator_conf.conv_conf.img_size_y * \
+            num_filters
+
+        config_assert(len(input_layer_names) == 2, "Conv is binary operator")
+
+    def calc_output_size(self, input_sizes):
+        return self.operator_conf.output_size
+
+
 # please refer to the comments in proto/ModelConfig.proto
 @config_class
 class Conv(Cfg):
@@ -1156,9 +1220,11 @@ def parse_image(image, input_layer_name, image_conf):
 
 def parse_norm(norm, input_layer_name, norm_conf):
     norm_conf.norm_type = norm.norm_type
-    config_assert(norm.norm_type in ['rnorm', 'cmrnorm-projection'],
-                  "norm-type %s is not in [rnorm, 'cmrnorm-projection']" %
-                  norm.norm_type)
+    config_assert(
+        norm.norm_type in
+        ['rnorm', 'cmrnorm-projection', 'cross-channel-norm'],
+        "norm-type %s is not in [rnorm, cmrnorm-projection, cross-channel-norm]"
+        % norm.norm_type)
     norm_conf.channels = norm.channels
     norm_conf.size = norm.size
     norm_conf.scale = norm.scale
@@ -1772,8 +1838,17 @@ class ConvTransLayerBase(LayerBase):
         use_gpu = int(g_command_config_args.get("use_gpu", 0))
         parallel_nn = int(g_command_config_args.get("parallel_nn", 0))
 
-        # cudnn_convt has not been implemented so use exconvt only
-        self.layer_type = "exconvt"
+        # Automatically select cudnn_type for GPU and exconvt for CPU
+        # if set type=exconvt, but still reserve the way user specify
+        # exconvt or cudnn_convt manually.
+        if self.layer_type == "cudnn_convt":
+            config_assert(use_gpu, "cudnn_convt only support GPU")
+
+        if (use_gpu == 1 and self.layer_type != "exconvt" and
+            (parallel_nn == 0 or self.config.device > -1)):
+            self.layer_type = "cudnn_convt"
+        else:
+            self.layer_type = "exconvt"
         # need to specify layer in config
         self.config.type = self.layer_type
 
@@ -1790,10 +1865,9 @@ class ConvTransLayerBase(LayerBase):
                 trans=True)
             conv_conf = self.config.inputs[input_index].conv_conf
             psize = self.calc_parameter_size(conv_conf)
-            print("output size for %s is %d " % (name, conv_conf.output_x))
             self.create_input_parameter(input_index, psize)
-            self.set_layer_size(
-                (conv_conf.img_size**2) * self.config.num_filters)
+            self.set_cnn_layer(name, conv_conf.img_size_y, conv_conf.img_size,
+                               self.config.num_filters)
 
         psize = self.config.size
         if shared_biases:
@@ -1810,6 +1884,11 @@ class ConvTransLayer(ConvTransLayerBase):
     layer_type = 'exconvt'
 
 
+@config_layer('cudnn_convt')
+class ConvTransLayer(ConvTransLayerBase):
+    layer_type = 'cudnn_convt'
+
+
 @config_layer('norm')
 class NormLayer(LayerBase):
     def __init__(self, name, inputs, **xargs):
@@ -1821,6 +1900,9 @@ class NormLayer(LayerBase):
                        norm_conf)
             self.set_cnn_layer(name, norm_conf.output_y, norm_conf.output_x,
                                norm_conf.channels, False)
+            if norm_conf.norm_type == "cross-channel-norm":
+                self.create_input_parameter(0, norm_conf.channels,
+                                            [norm_conf.channels, 1])
 
 
 @config_layer('pool')
@@ -2222,7 +2304,10 @@ def Link(
 
 # memory for recurrent layer group.
 # *name* and *size* are actual layer's name and size.
-# will return name of the memory,
+# If *name* is None, need to provide *memory_name* and need to use
+# SetMemoryInput() later to specify the layer which this memory remembers.
+#
+# return the name of the memory,
 # use this name if you assign the memory as other layer's input
 #
 # boot frame of memory is zeroed by default,
@@ -2234,15 +2319,18 @@ def Link(
 # can only be initailized by a *boot_layer* which is a sequence.
 #
 @config_func
-def Memory(
-        name,
-        size,
-        is_sequence=False,
-        boot_layer=None,
-        boot_bias=False,
-        boot_bias_active_type="",
-        boot_with_const_id=None, ):
-    agent_name = name + "+delay1"
+def Memory(name,
+           size,
+           is_sequence=False,
+           boot_layer=None,
+           boot_bias=False,
+           boot_bias_active_type="",
+           boot_with_const_id=None,
+           memory_name=None):
+    if not memory_name:
+        config_assert(name is not None, "name needs cannot be None")
+        memory_name = name + "+delay1"
+    agent_name = memory_name
     if is_sequence:
         agent_layer = SequenceAgentLayer(agent_name, size)
     else:
@@ -2250,7 +2338,8 @@ def Memory(
     config_assert(g_current_submodel.is_recurrent_layer_group,
                   'Memory should be used in recurrent layer group only')
     memory = g_current_submodel.memories.add()
-    memory.layer_name = MakeLayerNameInSubmodel(name)
+    if name is not None:
+        memory.layer_name = MakeLayerNameInSubmodel(name)
     memory.link_name = MakeLayerNameInSubmodel(agent_name)
     memory.is_sequence = is_sequence
     options = sum((boot_layer is not None, bool(boot_bias),
@@ -2274,6 +2363,17 @@ def Memory(
     return agent_name
 
 
+@config_func
+def SetMemoryInput(memory_name, layer_name):
+    memory_name = MakeLayerNameInSubmodel(memory_name)
+    layer_name = MakeLayerNameInSubmodel(layer_name)
+    for mem in g_current_submodel.memories:
+        if mem.link_name == memory_name:
+            mem.layer_name = layer_name
+            return
+    logger.fatal("Nonexistent memory name: " + memory_name)
+
+
 # Generator for recurrent layer group, to use it:
 #  1. define a id layer as output of layer group
 #  2. define a memory of this id layer, and assign a boot id(begin of sequence)
@@ -2301,14 +2401,9 @@ def Generator(
 
 @config_layer('expand')
 class ExpandLayer(LayerBase):
-    def __init__(self,
-                 name,
-                 inputs,
-                 trans_type='non-seq',
-                 device=None,
-                 bias=False):
+    def __init__(self, name, inputs, trans_type='non-seq', bias=False, **xargs):
         super(ExpandLayer, self).__init__(
-            name, 'expand', 0, inputs=inputs, device=device)
+            name, 'expand', 0, inputs=inputs, **xargs)
         config_assert(
             len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs')
         self.config.trans_type = trans_type
@@ -2339,11 +2434,10 @@ class MaxLayer(LayerBase):
                  inputs,
                  trans_type='non-seq',
                  active_type='linear',
-                 device=None,
                  bias=False,
-                 output_max_index=None):
-        super(MaxLayer, self).__init__(
-            name, 'max', 0, inputs=inputs, device=device)
+                 output_max_index=None,
+                 **xargs):
+        super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, **xargs)
         config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input')
         self.config.trans_type = trans_type
         self.config.active_type = active_type
@@ -2390,59 +2484,57 @@ class SequenceLastInstanceLayer(LayerBase):
                  inputs,
                  active_type='linear',
                  trans_type='non-seq',
-                 device=None,
-                 bias=False):
+                 bias=False,
+                 stride=-1,
+                 **xargs):
         super(SequenceLastInstanceLayer, self).__init__(
             name,
             'seqlastins',
             0,
             inputs=inputs,
-            device=device,
-            active_type=active_type)
+            active_type=active_type,
+            **xargs)
         config_assert(
             len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input')
+        if trans_type == 'seq':
+            config_assert(stride == -1, 'subseq does not support stride window')
         self.config.trans_type = trans_type
-        for input_index in xrange(len(self.inputs)):
-            input_layer = self.get_input_layer(input_index)
-            self.set_layer_size(input_layer.size)
+        self.config.seq_pool_stride = stride
+        self.set_layer_size(self.get_input_layer(0).size)
         self.create_bias_parameter(bias, self.config.size)
 
 
 @config_layer('seqfirstins')
 class SequenceFirstInstanceLayer(SequenceLastInstanceLayer):
-    def __init__(
-            self,
-            name,
-            inputs,
-            active_type='linear',
-            trans_type='non-seq',
-            device=None,
-            bias=False, ):
+    def __init__(self,
+                 name,
+                 inputs,
+                 active_type='linear',
+                 trans_type='non-seq',
+                 bias=False,
+                 stride=-1,
+                 **xargs):
         super(SequenceFirstInstanceLayer, self).__init__(
             name,
             inputs=inputs,
             active_type=active_type,
-            device=device,
-            bias=bias)
-        self.config.trans_type = trans_type
+            trans_type=trans_type,
+            bias=bias,
+            stride=stride,
+            **xargs)
         self.config.select_first = True
 
 
 @config_layer('seqconcat')
 class SequenceConcatLayer(LayerBase):
-    def __init__(self,
-                 name,
-                 inputs,
-                 active_type='linear',
-                 device=None,
-                 bias=False):
+    def __init__(self, name, inputs, active_type='linear', bias=False, **xargs):
         super(SequenceConcatLayer, self).__init__(
             name,
             'seqconcat',
             0,
             inputs=inputs,
-            device=device,
-            active_type=active_type)
+            active_type=active_type,
+            **xargs)
         config_assert(
             len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs')
         for input_index in xrange(len(self.inputs)):
@@ -2458,15 +2550,15 @@ class SequenceReshapeLayer(LayerBase):
                  size,
                  inputs,
                  active_type='linear',
-                 device=None,
-                 bias=False):
+                 bias=False,
+                 **xargs):
         super(SequenceReshapeLayer, self).__init__(
             name,
             'seqreshape',
             size,
             inputs=inputs,
-            device=device,
-            active_type=active_type)
+            active_type=active_type,
+            **xargs)
         config_assert(
             len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs')
         self.set_layer_size(size)
@@ -2475,19 +2567,9 @@ class SequenceReshapeLayer(LayerBase):
 
 @config_layer('subseq')
 class SubSequenceLayer(LayerBase):
-    def __init__(self,
-                 name,
-                 inputs,
-                 active_type='linear',
-                 device=None,
-                 bias=False):
+    def __init__(self, name, inputs, active_type='linear', bias=False, **xargs):
         super(SubSequenceLayer, self).__init__(
-            name,
-            'subseq',
-            0,
-            inputs=inputs,
-            device=device,
-            active_type=active_type)
+            name, 'subseq', 0, inputs=inputs, active_type=active_type, **xargs)
         config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs')
         input_layer0 = self.get_input_layer(0)
         size = input_layer0.size
@@ -2644,15 +2726,10 @@ class AverageLayer(LayerBase):
                  average_strategy='average',
                  trans_type='non-seq',
                  active_type='linear',
-                 device=None,
-                 bias=False):
+                 bias=False,
+                 **xargs):
         super(AverageLayer, self).__init__(
-            name,
-            'average',
-            0,
-            inputs=inputs,
-            device=device,
-            active_type=active_type)
+            name, 'average', 0, inputs=inputs, active_type=active_type, **xargs)
         self.config.average_strategy = average_strategy
         self.config.trans_type = trans_type
         config_assert(len(inputs) == 1, 'AverageLayer must have 1 input')
@@ -2676,9 +2753,9 @@ class CosSimLayer(LayerBase):
 
 @config_layer('tensor')
 class TensorLayer(LayerBase):
-    def __init__(self, name, size, inputs, device=None, bias=True, **xargs):
+    def __init__(self, name, size, inputs, bias=True, **xargs):
         super(TensorLayer, self).__init__(
-            name, 'tensor', size, inputs=inputs, device=device, **xargs)
+            name, 'tensor', size, inputs=inputs, **xargs)
         config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs')
         config_assert(size > 0, 'size must be positive')
         config_assert(inputs[1].parameter_name == None,
@@ -3029,7 +3106,7 @@ class CRFLayer(LayerBase):
         super(CRFLayer, self).__init__(name, 'crf', size, inputs, device=device)
         config_assert(2 <= len(self.inputs) <= 3,
                       'CRFLayer must have 2 or 3 inputs')
-        self.create_input_parameter(0, size * (size + 2), [size, size + 2])
+        self.create_input_parameter(0, size * (size + 2), [size + 2, size])
         self.config.coeff = coeff
 
 
@@ -3051,7 +3128,7 @@ class CRFDecodingLayer(LayerBase):
         config_assert(
             len(self.inputs) <= 2,
             'CRFDecodingLayer cannot have more than 2 inputs')
-        self.create_input_parameter(0, size * (size + 2), [size, size + 2])
+        self.create_input_parameter(0, size * (size + 2), [size + 2, size])
 
 
 @config_layer('ctc')
diff --git a/python/paddle/trainer_config_helpers/default_decorators.py b/python/paddle/trainer_config_helpers/default_decorators.py
index ad3efcbf369411b9c42b2a32ed05b04f86bf7de6..69d860d9dab9c1d90e4d6a6940d66fcb551f6eb6 100644
--- a/python/paddle/trainer_config_helpers/default_decorators.py
+++ b/python/paddle/trainer_config_helpers/default_decorators.py
@@ -52,6 +52,10 @@ def wrap_param_default(param_names=None,
                     kwargs[name] = default_factory(func)
             return func(*args, **kwargs)
 
+        if hasattr(func, 'argspec'):
+            __wrapper__.argspec = func.argspec
+        else:
+            __wrapper__.argspec = inspect.getargspec(func)
         return __wrapper__
 
     return __impl__
@@ -93,13 +97,13 @@ def reset_hook():
 register_parse_config_hook(reset_hook)
 
 
-def wrap_name_default(name_prefix=None):
+def wrap_name_default(name_prefix=None, name_param="name"):
     """
     Decorator to set "name" arguments default to "{name_prefix}_{invoke_count}".
 
     ..  code:: python
 
-        @default_name("some_name")
+        @wrap_name_default("some_name")
         def func(name=None):
             print name      # name will never be None. If name is not set,
                             # name will be "some_name_%d"
@@ -111,7 +115,7 @@ def wrap_name_default(name_prefix=None):
     """
     factory = DefaultNameFactory(name_prefix)
     _name_factories.append(factory)
-    return wrap_param_default(["name"], factory)
+    return wrap_param_default([name_param], factory)
 
 
 def wrap_param_attr_default(param_names=None, default_factory=None):
diff --git a/python/paddle/trainer_config_helpers/layer_math.py b/python/paddle/trainer_config_helpers/layer_math.py
index 2d9e36f2b0d379d907634208a45c69efa9dbba3d..544b443825393c9a31c0375724d4ca63dac5c5eb 100644
--- a/python/paddle/trainer_config_helpers/layer_math.py
+++ b/python/paddle/trainer_config_helpers/layer_math.py
@@ -39,6 +39,7 @@ register_unary_math_op('abs', act.AbsActivation())
 register_unary_math_op('sigmoid', act.SigmoidActivation())
 register_unary_math_op('tanh', act.TanhActivation())
 register_unary_math_op('square', act.SquareActivation())
+register_unary_math_op('relu', act.ReluActivation())
 
 
 def add(layeroutput, other):
diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py
index 00aef80691fba05be543beadf22acde7d28c5e8e..97db3c2d4c018355aaaa1d517cc65db3aefad74a 100755
--- a/python/paddle/trainer_config_helpers/layers.py
+++ b/python/paddle/trainer_config_helpers/layers.py
@@ -14,10 +14,11 @@
 
 import functools
 import collections
+import inspect
 
 from paddle.trainer.config_parser import *
 from .activations import LinearActivation, SigmoidActivation, TanhActivation, \
-    ReluActivation, IdentityActivation, SoftmaxActivation
+    ReluActivation, IdentityActivation, SoftmaxActivation, BaseActivation
 from .evaluators import *
 from .poolings import MaxPooling, AvgPooling, BasePoolingType
 from .attrs import *
@@ -51,6 +52,7 @@ __all__ = [
     "cos_sim",
     "hsigmoid",
     "conv_projection",
+    "mse_cost",
     "regression_cost",
     'classification_cost',
     "LayerOutput",
@@ -110,8 +112,11 @@ __all__ = [
     'out_prod_layer',
     'print_layer',
     'priorbox_layer',
+    'cross_channel_norm_layer',
     'spp_layer',
     'pad_layer',
+    'eos_layer',
+    'layer_support',
 ]
 
 
@@ -284,6 +289,14 @@ class LayerOutput(object):
         """
         assert False, "this method should not be invoked"
 
+    def set_input(self, input):
+        """
+        Set the input for a memory layer. Can only be used for memory layer
+        """
+        assert isinstance(input, LayerOutput)
+        assert self.layer_type == LayerType.MEMORY
+        SetMemoryInput(self.name, input.name)
+
 
 ERROR_CLIPPING = 'error_clipping_threshold'
 DROPOUT = 'drop_rate'
@@ -314,6 +327,11 @@ def layer_support(*attrs):
                     val.check(method.__name__)
             return method(*args, **kwargs)
 
+        if hasattr(method, 'argspec'):
+            wrapper.argspec = method.argspec
+        else:
+            wrapper.argspec = inspect.getargspec(method)
+
         return wrapper
 
     return decorator
@@ -695,8 +713,9 @@ class MixedLayerType(LayerOutput):
         assert len(self.inputs) == 0
         return self
 
-    def __exit__(self, *args, **kwargs):
-        del args, kwargs  # unused parameter to suppress warning
+    def __exit__(self, exc_type, exc_value, tb):
+        if exc_value is not None:
+            raise exc_value
         assert len(self.inputs) != 0
         ml = MixedLayer(
             name=self.name,
@@ -708,6 +727,7 @@ class MixedLayerType(LayerOutput):
         # update the size which might be computed inside MixedLayer
         # according to the operator's output size
         self.size = ml.config.size
+        self.finalized = True
 
 
 @wrap_name_default("mixed")
@@ -786,17 +806,16 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
 
     ..  code-block:: python
 
-        data = data_layer(name="input",
-                          size=1000)
+        data = data_layer(name="input", size=1000)
 
     :param name: Name of this data layer.
     :type name: basestring
     :param size: Size of this data layer.
     :type size: int
     :param height: Height of this data layer, used for image
-    :type size: int|None
+    :type height: int|None
     :param width: Width of this data layer, used for image
-    :type size: int|None
+    :type width: int|None
     :param layer_attr: Extra Layer Attribute.
     :type layer_attr: ExtraLayerAttribute.
     :return: LayerOutput object.
@@ -990,6 +1009,46 @@ def priorbox_layer(input,
         size=size)
 
 
+@wrap_name_default("cross_channel_norm")
+def cross_channel_norm_layer(input, name=None, param_attr=None):
+    """
+    Normalize a layer's output. This layer is necessary for ssd.
+    This layer applys normalize across the channels of each sample to
+    a conv layer's output and scale the output by a group of trainable
+    factors which dimensions equal to the channel's number.
+
+    :param name: The Layer Name.
+    :type name: basestring
+    :param input: The input layer.
+    :type input: LayerOutput
+    :param param_attr: The Parameter Attribute|list.
+    :type param_attr: ParameterAttribute
+    :return: LayerOutput
+    """
+    assert input.num_filters is not None
+    Layer(
+        name=name,
+        type=LayerType.NORM_LAYER,
+        inputs=[
+            Input(
+                input.name,
+                norm=Norm(
+                    norm_type="cross-channel-norm",
+                    channels=input.num_filters,
+                    size=input.size,
+                    scale=0,
+                    pow=0,
+                    blocked=0),
+                **param_attr.attr)
+        ])
+    return LayerOutput(
+        name,
+        LayerType.NORM_LAYER,
+        parents=input,
+        num_filters=input.num_filters,
+        size=input.size)
+
+
 @wrap_name_default("seq_pooling")
 @wrap_bias_attr_default(has_bias=False)
 @wrap_param_default(['pooling_type'], default_factory=lambda _: MaxPooling())
@@ -1283,15 +1342,29 @@ def grumemory(input,
 def last_seq(input,
              name=None,
              agg_level=AggregateLevel.EACH_TIMESTEP,
+             stride=-1,
              layer_attr=None):
     """
     Get Last Timestamp Activation of a sequence.
 
+    If stride > 0, this layer slides a window whose size is determined by stride, 
+    and return the last value of the window as the output. Thus, a long sequence 
+    will be shorten. Note that for sequence with sub-sequence, the default value 
+    of stride is -1.
+
+    The simple usage is:
+
+    .. code-block:: python
+
+       seq = last_seq(input=layer)
+
     :param agg_level: Aggregated level
     :param name: Layer name.
     :type name: basestring
     :param input: Input layer name.
     :type input: LayerOutput
+    :param stride: window size.  
+    :type stride: Int
     :param layer_attr: extra layer attributes.
     :type layer_attr: ExtraLayerAttribute.
     :return: LayerOutput object.
@@ -1303,11 +1376,15 @@ def last_seq(input,
                        " series information at all. Maybe you want to use"
                        " first_seq instead.")
 
+    if agg_level == AggregateLevel.EACH_SEQUENCE:
+        assert stride == -1
+
     Layer(
         name=name,
         type=LayerType.SEQUENCE_LAST_INSTANCE,
         inputs=[input.name],
         trans_type=agg_level,
+        stride=stride,
         **ExtraLayerAttribute.to_kwargs(layer_attr))
     return LayerOutput(
         name,
@@ -1321,15 +1398,29 @@ def last_seq(input,
 def first_seq(input,
               name=None,
               agg_level=AggregateLevel.EACH_TIMESTEP,
+              stride=-1,
               layer_attr=None):
     """
     Get First Timestamp Activation of a sequence.
 
+    If stride > 0, this layer slides a window whose size is determined by stride, 
+    and return the first value of the window as the output. Thus, a long sequence 
+    will be shorten. Note that for sequence with sub-sequence, the default value 
+    of stride is -1.
+
+    The simple usage is:
+
+    .. code-block:: python
+
+       seq = first_seq(input=layer)
+
     :param agg_level: aggregation level
     :param name: Layer name.
     :type name: basestring
     :param input: Input layer name.
     :type input: LayerOutput
+    :param stride: window size.  
+    :type stride: Int
     :param layer_attr: extra layer attributes.
     :type layer_attr: ExtraLayerAttribute.
     :return: LayerOutput object.
@@ -1342,11 +1433,15 @@ def first_seq(input,
                        ' time series information at all. Maybe you want to use'
                        ' last_seq instead.')
 
+    if agg_level == AggregateLevel.EACH_SEQUENCE:
+        assert stride == -1
+
     Layer(
         name=name,
         type=LayerType.SEQUENCE_FIRST_INSTANCE,
         inputs=[input.name],
         trans_type=agg_level,
+        stride=stride,
         **ExtraLayerAttribute.to_kwargs(layer_attr))
     return LayerOutput(
         name,
@@ -1425,7 +1520,7 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
 
     .. code-block:: python
 
-       expand = repeat_layer(layer, 4)
+       expand = repeat_layer(input=layer, num_repeats=4)
 
     :param input: Input layer
     :type input: LayerOutput
@@ -1797,6 +1892,12 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
     Note that the above computation is for one sample. Multiple samples are
     processed in one batch.
 
+    The example usage is:
+
+    .. code-block:: python
+
+       cos = cos_sim(a=layer1, b=layer2, size=3)
+
     :param name: layer name
     :type name: basestring
     :param a: input layer a
@@ -1839,7 +1940,7 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
 @layer_support()
 def hsigmoid(input,
              label,
-             num_classes,
+             num_classes=None,
              name=None,
              bias_attr=None,
              param_attr=None,
@@ -1855,8 +1956,7 @@ def hsigmoid(input,
     ..  code-block:: python
 
         cost = hsigmoid(input=[layer1, layer2],
-                        label=data_layer,
-                        num_classes=3)
+                        label=data_layer)
 
     :param input: Input layers. It could be a LayerOutput or list/tuple of
                  LayerOutput.
@@ -1864,12 +1964,14 @@ def hsigmoid(input,
     :param label: Label layer.
     :type label: LayerOutput
     :param num_classes: number of classes.
-    :type num_classes: int
+    :type num_classes: int|None
     :param name: layer name
     :type name: basestring
     :param bias_attr: Bias attribute. None means default bias.
                       False means no bias.
     :type bias_attr: ParameterAttribute|False
+    :param param_attr: Parameter Attribute. None means default parameter.
+    :type param_attr: ParameterAttribute|None
     :param layer_attr: Extra Layer Attribute.
     :type layer_attr: ExtraLayerAttribute
     :return: LayerOutput object.
@@ -1889,6 +1991,11 @@ def hsigmoid(input,
     assert isinstance(label, LayerOutput)
     assert label.layer_type == LayerType.DATA
 
+    if num_classes is None:
+        num_classes = label.size
+    if num_classes is None or num_classes <= 2:
+        raise ValueError("hsigmoid label size must larger than 2.")
+
     ipts_for_layer = []
     parents = []
     for each_input, each_param_attr in zip(input, param_attr):
@@ -1958,6 +2065,16 @@ def img_conv_layer(input,
     pieces. First 256/4 = 64 channels will process by first 32 filters. The
     rest channels will be processed by rest group of filters.
 
+    The example usage is:
+
+    ..  code-block:: python
+
+        conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1,
+                              num_channels=8,
+                              num_filters=16, stride=1,
+                              bias_attr=False,
+                              act=ReluActivation())
+
     :param name: Layer name.
     :type name: basestring
     :param input: Layer Input.
@@ -1999,8 +2116,9 @@ def img_conv_layer(input,
     :param trans: true if it is a convTransLayer, false if it is a convLayer
     :type trans: bool
     :param layer_type: specify the layer_type, default is None. If trans=True,
-                       layer_type has to be "exconvt", otherwise layer_type
-                       has to be either "exconv" or "cudnn_conv"
+                       layer_type has to be "exconvt" or "cudnn_convt", 
+                       otherwise layer_type has to be either "exconv" or 
+                       "cudnn_conv"
     :type layer_type: String
     :return: LayerOutput object.
     :rtype: LayerOutput
@@ -2040,7 +2158,7 @@ def img_conv_layer(input,
 
     if layer_type:
         if trans:
-            assert layer_type in ["exconvt"]
+            assert layer_type in ["exconvt", "cudnn_convt"]
         else:
             assert layer_type in ["exconv", "cudnn_conv"]
         lt = layer_type
@@ -2097,6 +2215,34 @@ def img_pool_layer(input,
 
     .. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/
 
+    - ceil_mode=True:
+
+    ..  math::
+
+        w = 1 + int(ceil(input\_width + 2 * padding - pool\_size) / float(stride))
+        h = 1 + int(ceil(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
+
+    - ceil_mode=False:
+
+    ..  math::
+
+        w = 1 + int(floor(input\_width + 2 * padding - pool\_size) / float(stride))
+        h = 1 + int(floor(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
+
+    The example usage is:
+
+    ..  code-block:: python
+
+        maxpool = img_pool_layer(input=conv,
+                                 pool_size=3,
+                                 pool_size_y=5,
+                                 num_channels=8,
+                                 stride=1,
+                                 stride_y=2,
+                                 padding=1,
+                                 padding_y=2,
+                                 pool_type=MaxPooling())
+
     :param padding: pooling padding width.
     :type padding: int
     :param padding_y: pooling padding height. It's equal to padding by default.
@@ -2123,19 +2269,6 @@ def img_pool_layer(input,
     :param ceil_mode: Wether to use ceil mode to calculate output height and with.
                       Defalut is True. If set false, Otherwise use floor.
 
-                      - ceil_mode=True:
-
-                      ..  math::
-
-                          w = 1 + int(ceil(input_width + 2 * padding - pool_size) / float(stride))
-                          h = 1 + int(ceil(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
-
-                      - ceil_mode=False:
-
-                      ..  math::
-
-                          w = 1 + int(floor(input_width + 2 * padding - pool_size) / float(stride))
-                          h = 1 + int(floor(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
     :type ceil_mode: bool
     :return: LayerOutput object.
     :rtype: LayerOutput
@@ -2150,8 +2283,9 @@ def img_pool_layer(input,
         pool_type.name = 'avg'
 
     type_name = pool_type.name + '-projection' \
-      if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)) \
-      else pool_type.name
+        if (
+    isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)) \
+        else pool_type.name
 
     pool_size_y = pool_size if pool_size_y is None else pool_size_y
     stride_y = stride if stride_y is None else stride_y
@@ -2197,6 +2331,15 @@ def spp_layer(input,
     The details please refer to
     `Kaiming He's paper `_.
 
+    The example usage is:
+
+    ..  code-block:: python
+
+        spp = spp_layer(input=data, 
+                        pyramid_height=2, 
+                        num_channels=16, 
+                        pool_type=MaxPooling())
+
     :param name: layer name.
     :type name: basestring
     :param input: layer's input.
@@ -2285,6 +2428,12 @@ def img_cmrnorm_layer(input,
     The details please refer to
     `Alex's paper `_.
 
+    The example usage is:
+
+    ..  code-block:: python
+    
+        norm = img_cmrnorm_layer(input=net, size=5)
+
     :param name: layer name.
     :type name: None|basestring
     :param input: layer's input.
@@ -2340,6 +2489,12 @@ def batch_norm_layer(input,
     The details of batch normalization please refer to this
     `paper `_.
 
+    The example usage is:
+
+    ..  code-block:: python
+    
+        norm = batch_norm_layer(input=net, act=ReluActivation())
+
     :param name: layer name.
     :type name: basestring
     :param input: batch normalization input. Better be linear activation.
@@ -2686,8 +2841,10 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None,
         size=a.size)
 
 
+@wrap_name_default("memory", "memory_name")
 def memory(name,
            size,
+           memory_name=None,
            is_seq=False,
            boot_layer=None,
            boot_bias=None,
@@ -2709,14 +2866,32 @@ def memory(name,
     If boot_layer is not null, the memory is just the boot_layer's output.
     Set :code:`is_seq` is true boot layer is sequence.
 
-
     The same name layer in recurrent group will set memory on each time
     step.
 
-    :param name: memory's name.
+    .. code-block:: python
+
+       mem = memory(size=256, name='state')
+       state = fc_layer(input=mem, size=256, name='state')
+
+    If you do not want to specify the name, you can equivalently use set_input()
+    to specify the layer needs to be remembered as the following:
+
+    .. code-block:: python
+       mem = memory(size=256)
+       state = fc_layer(input=mem, size=256)
+       mem.set_input(mem)
+
+
+    :param name: the name of the layer which this memory remembers.
+                 If name is None, user should call set_input() to specify the
+                 name of the layer which this memory remembers.
     :type name: basestring
     :param size: size of memory.
     :type size: int
+    :param memory_name: the name of the memory.
+                        It is ignored when name is provided.
+    :type memory_name: basestring
     :param is_seq: is sequence for boot_layer
     :type is_seq: bool
     :param boot_layer: boot layer of memory.
@@ -2738,13 +2913,21 @@ def memory(name,
         boot_bias = ParamAttr.to_bias(boot_bias)
 
     assert boot_layer is None or isinstance(boot_layer, LayerOutput)
+    if name is not None:
+        memory_name = None
 
-    agent_name = Memory(name, size, is_seq, boot_layer.name
-                        if boot_layer is not None else None, boot_bias,
-                        boot_bias_active_type.name, boot_with_const_id)
+    memory_name = Memory(
+        name,
+        size,
+        is_sequence=is_seq,
+        boot_layer=boot_layer.name if boot_layer is not None else None,
+        boot_bias=boot_bias,
+        boot_bias_active_type=boot_bias_active_type.name,
+        boot_with_const_id=boot_with_const_id,
+        memory_name=memory_name)
 
     lout = LayerOutput(
-        name=agent_name,
+        name=memory_name,
         size=size,
         layer_type=LayerType.MEMORY,
         parents=[boot_layer] if boot_layer is not None else None)
@@ -3142,8 +3325,8 @@ def recurrent_group(step,
 
     assert (targetInlink == None or targetInlink_in_inlinks())
     targetInlinkName = None if targetInlink == None \
-                            else targetInlink.name if isinstance(targetInlink, LayerOutput) \
-                                                   else targetInlink.input.name
+        else targetInlink.name if isinstance(targetInlink, LayerOutput) \
+        else targetInlink.input.name
 
     contains_sub_seq = [False]
 
@@ -3492,7 +3675,7 @@ def __cost_input__(input, label, weight=None):
     ipts = [Input(input.name), Input(label.name)]
     parents = [input, label]
     if weight is not None:
-        assert weight.layer_type == LayerType.DATA
+        assert weight.size == 1
         ipts.append(Input(weight.name))
         parents.append(weight)
     return ipts, parents
@@ -3500,11 +3683,14 @@ def __cost_input__(input, label, weight=None):
 
 @wrap_name_default()
 @layer_support()
-def regression_cost(input, label, weight=None, name=None, layer_attr=None):
+def mse_cost(input, label, weight=None, name=None, layer_attr=None):
     """
-    Regression Layer.
+    mean squared error cost:
+
+    ..  math::
+
+       $\frac{1}{N}\sum_{i=1}^N(t _i- y_i)^2$
 
-    TODO(yuyang18): Complete this method.
 
     :param name: layer name.
     :type name: basestring
@@ -3530,6 +3716,9 @@ def regression_cost(input, label, weight=None, name=None, layer_attr=None):
     return LayerOutput(name, LayerType.COST, parents=parents, size=1)
 
 
+regression_cost = mse_cost
+
+
 @wrap_name_default("cost")
 @layer_support()
 def classification_cost(input,
@@ -3600,7 +3789,8 @@ def conv_operator(img,
                   padding=0,
                   filter_size_y=None,
                   stride_y=None,
-                  padding_y=None):
+                  padding_y=None,
+                  trans=False):
     """
     Different from img_conv_layer, conv_op is an Operator, which can be used
     in mixed_layer. And conv_op takes two inputs to perform convolution.
@@ -3656,7 +3846,9 @@ def conv_operator(img,
     if filter.size is not None:
         filter.size = filter_size * filter_size_y * num_filters * num_channels
 
-    op = ConvOperator(
+    opCls = ConvTransOperator if trans else ConvOperator
+
+    op = opCls(
         input_layer_names=[img.name, filter.name],
         num_filters=num_filters,
         conv_conf=Conv(
@@ -3668,6 +3860,7 @@ def conv_operator(img,
             padding_y=padding_y,
             stride_y=stride_y,
             groups=1))
+
     op.origin = [img, filter]
     return op
 
@@ -3683,7 +3876,8 @@ def conv_projection(input,
                     stride_y=None,
                     padding_y=None,
                     groups=1,
-                    param_attr=None):
+                    param_attr=None,
+                    trans=False):
     """
     Different from img_conv_layer and conv_op, conv_projection is an Projection,
     which can be used in mixed_layer and conat_layer. It use cudnn to implement
@@ -3722,6 +3916,8 @@ def conv_projection(input,
     :type groups: int
     :param param_attr: Convolution param attribute. None means default attribute
     :type param_attr: ParameterAttribute
+    :param trans: whether it is convTrans or conv
+    :type trans: boolean
     :return: A DotMulProjection Object.
     :rtype: DotMulProjection
     """
@@ -3758,7 +3954,9 @@ def conv_projection(input,
         param_attr.attr["initial_strategy"] = 0
         param_attr.attr["initial_smart"] = False
 
-    proj = ConvProjection(
+    projCls = ConvTransProjection if trans else ConvProjection
+
+    proj = projCls(
         input_layer_name=input.name,
         num_filters=num_filters,
         conv_conf=Conv(
@@ -3903,13 +4101,13 @@ def conv_shift_layer(a, b, name=None, layer_attr=None):
 
     .. code-block:: python
 
-       conv_shift = conv_shift_layer(input=[layer1, layer2])
+       conv_shift = conv_shift_layer(a=layer1, b=layer2)
 
     :param name: layer name
     :type name: basestring
     :param a: Input layer a.
     :type a: LayerOutput
-    :param b: input layer b
+    :param b: input layer b.
     :type b: LayerOutput
     :param layer_attr: layer's extra attribute.
     :type layer_attr: ExtraLayerAttribute
@@ -4001,8 +4199,8 @@ def tensor_layer(a,
 @wrap_act_default()
 @layer_support()
 def selective_fc_layer(input,
-                       select,
                        size,
+                       select=None,
                        act=None,
                        name=None,
                        pass_generation=False,
@@ -4029,6 +4227,7 @@ def selective_fc_layer(input,
     :type input: LayerOutput|list|tuple
     :param select: The select layer. The output of select layer should be a
                    sparse binary matrix, and treat as the mask of selective fc.
+                   If is None, acts exactly like fc_layer.
     :type select: LayerOutput
     :param size: The layer dimension.
     :type size: int
@@ -4257,7 +4456,7 @@ def block_expand_layer(input,
 
     .. code-block:: python
 
-       block_expand = block_expand_layer(input,
+       block_expand = block_expand_layer(input=layer,
                                          num_channels=128,
                                          stride_x=1,
                                          stride_y=1,
@@ -4461,7 +4660,7 @@ def warp_ctc_layer(input,
         - You can set 'blank' to any value ranged in [0, num_classes], which
           should be consistent as that used in your labels.
         - As a native 'softmax' activation is interated to the warp-ctc library,
-         'linear' activation is expected instead in the 'input' layer.
+          'linear' activation is expected instead in the 'input' layer.
 
     The simple usage:
 
@@ -4594,6 +4793,13 @@ def crf_decoding_layer(input,
     this layer will also calculate error. output.value[i] is 1 for incorrect
     decoding or 0 for correct decoding.
 
+    The simple usage:
+
+    .. code-block:: python
+
+      crf_decoding = crf_decoding_layer(input=input,
+                                        size=label_dim)
+
     :param input: The first input layer.
     :type input: LayerOutput
     :param size: size of this layer.
@@ -4632,12 +4838,14 @@ def crf_decoding_layer(input,
     return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1)
 
 
+@wrap_act_default(act=SigmoidActivation())
 @wrap_bias_attr_default(has_bias=True)
 @wrap_name_default()
 @layer_support()
 def nce_layer(input,
               label,
               num_classes,
+              act=None,
               weight=None,
               num_neg_samples=10,
               neg_distribution=None,
@@ -4666,6 +4874,8 @@ def nce_layer(input,
     :type weight: LayerOutput
     :param num_classes: number of classes.
     :type num_classes: int
+    :param act: Activation, default is Sigmoid.
+    :type act: BaseActivation
     :param num_neg_samples: number of negative samples. Default is 10.
     :type num_neg_samples: int
     :param neg_distribution: The distribution for generating the random negative labels.
@@ -4687,7 +4897,9 @@ def nce_layer(input,
     if neg_distribution is not None:
         assert isinstance(neg_distribution, collections.Sequence)
         assert len(neg_distribution) == num_classes
-        assert sum(neg_distribution) == 1
+        assert abs(sum(neg_distribution) - 1.0) < 1e-5
+    if not isinstance(act, BaseActivation):
+        raise TypeError()
 
     ipts_for_layer = []
     parents = []
@@ -4709,12 +4921,17 @@ def nce_layer(input,
         type=LayerType.NCE_LAYER,
         num_classes=num_classes,
         neg_sampling_dist=neg_distribution,
+        active_type=act.name,
         num_neg_samples=num_neg_samples,
         inputs=ipts_for_layer,
         bias=ParamAttr.to_bias(bias_attr),
         **ExtraLayerAttribute.to_kwargs(layer_attr))
     return LayerOutput(
-        name, LayerType.NCE_LAYER, parents=parents, size=l.config.size)
+        name,
+        LayerType.NCE_LAYER,
+        parents=parents,
+        size=l.config.size,
+        activation=act)
 
 
 """
@@ -4859,7 +5076,12 @@ def lambda_cost(input,
 
 @wrap_name_default()
 @layer_support()
-def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None):
+def cross_entropy(input,
+                  label,
+                  name=None,
+                  coeff=1.0,
+                  weight=None,
+                  layer_attr=None):
     """
     A loss layer for multi class entropy.
 
@@ -4874,22 +5096,27 @@ def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None):
     :type input: LayerOutput.
     :param name: The name of this layers. It is not necessary.
     :type name: None|basestring.
-    :param coeff: The coefficient affects the gradient in the backward.
+    :param coeff: The cost is multiplied with coeff.
+                  The coefficient affects the gradient in the backward.
     :type coeff: float.
+    :param weight: The cost of each sample is multiplied with each weight.
+                   The weight should be a layer with size=1. Note that gradient
+                   will not be calculated for weight.
+    :type weight: LayerOutout
     :param layer_attr: Extra Layer Attribute.
     :type layer_attr: ExtraLayerAttribute
     :return: LayerOutput object.
     :rtype: LayerOutput.
     """
 
+    ipts, parents = __cost_input__(input, label, weight)
     Layer(
         name=name,
         type=LayerType.CROSS_ENTROPY,
-        inputs=[input.name, label.name],
+        inputs=ipts,
         coeff=coeff,
         **ExtraLayerAttribute.to_kwargs(layer_attr))
-    return LayerOutput(
-        name, LayerType.CROSS_ENTROPY, parents=[input, label], size=1)
+    return LayerOutput(name, LayerType.CROSS_ENTROPY, parents=parents, size=1)
 
 
 @wrap_name_default()
diff --git a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py
index 3a1a0132b64bb928e857905f99c0be2e81ccbda2..3c6dbc95e54898ca1e44c3dc010c9fb73a3bee30 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py
+++ b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py
@@ -14,4 +14,7 @@ for op in seq_op:
     for al in agg_level:
         opts.append(op(input=din, agg_level=al))
 
+for op in seq_op:
+    opts.append(op(input=din, agg_level=AggregateLevel.EACH_TIMESTEP, stride=5))
+
 outputs(opts)
diff --git a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py
index 3331c10d6497f58eb135208bd7abe48aacfb10ae..24c901c8ee3ab1c90fc14fbff761db06345a6313 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py
+++ b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py
@@ -7,8 +7,9 @@ x = layer_math.exp(x)
 x = layer_math.log(x)
 x = layer_math.abs(x)
 x = layer_math.sigmoid(x)
+x = layer_math.tanh(x)
 x = layer_math.square(x)
-x = layer_math.square(x)
+x = layer_math.relu(x)
 y = 1 + x
 y = y + 1
 y = x + y
diff --git a/python/paddle/trainer_config_helpers/tests/configs/projections.py b/python/paddle/trainer_config_helpers/tests/configs/projections.py
index aa4521dcd5db3f845871cfaaedb02a86bcbddc38..dc8975cb311582a621eb4a5a166ddc34348fe3e9 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/projections.py
+++ b/python/paddle/trainer_config_helpers/tests/configs/projections.py
@@ -34,11 +34,31 @@ flt = data_layer(name='filter', size=3 * 3 * 1 * 64)
 with mixed_layer() as m7:
     m7 += conv_operator(
         img=img, filter=flt, num_filters=64, num_channels=1, filter_size=3)
+    m7 += conv_projection(img, filter_size=3, num_filters=64, num_channels=1)
 
+with mixed_layer() as m8:
+    m8 += conv_operator(
+        img=img,
+        filter=flt,
+        num_filters=64,
+        num_channels=1,
+        filter_size=3,
+        stride=2,
+        padding=1,
+        trans=True)
+    m8 += conv_projection(
+        img,
+        filter_size=3,
+        num_filters=64,
+        num_channels=1,
+        stride=2,
+        padding=1,
+        trans=True)
 end = mixed_layer(
     input=[
         full_matrix_projection(input=m5),
-        trans_full_matrix_projection(input=m6), full_matrix_projection(input=m7)
+        trans_full_matrix_projection(input=m6),
+        full_matrix_projection(input=m7), full_matrix_projection(input=m8)
     ],
     size=100,
     layer_attr=ExtraAttr(
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr
index 6934fd0da62f90f9bbddef9a98798bf168f7fa8e..2818389b16cca75f5030b75fc4de8c89c06c5e02 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr
@@ -33,6 +33,8 @@ layers {
   bias_parameter_name: "___conv_0__.wbias"
   num_filters: 64
   shared_biases: true
+  height: 256
+  width: 256
 }
 layers {
   name: "__batch_norm_0__"
@@ -58,6 +60,8 @@ layers {
   }
   bias_parameter_name: "___batch_norm_0__.wbias"
   moving_average_fraction: 0.9
+  height: 256
+  width: 256
 }
 layers {
   name: "__crmnorm_0__"
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr
index 7b2911f8e367ebf9d0797e815a7532c714ef698e..12b2255f3a41119792d0f993ce2e03ce9ee3e994 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr
@@ -15,6 +15,7 @@ layers {
   }
   select_first: true
   trans_type: "seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__first_seq_1__"
@@ -26,6 +27,7 @@ layers {
   }
   select_first: true
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__last_seq_0__"
@@ -36,6 +38,7 @@ layers {
     input_layer_name: "data"
   }
   trans_type: "seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__last_seq_1__"
@@ -46,12 +49,38 @@ layers {
     input_layer_name: "data"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
+}
+layers {
+  name: "__first_seq_2__"
+  type: "seqlastins"
+  size: 30
+  active_type: "linear"
+  inputs {
+    input_layer_name: "data"
+  }
+  select_first: true
+  trans_type: "non-seq"
+  seq_pool_stride: 5
+}
+layers {
+  name: "__last_seq_2__"
+  type: "seqlastins"
+  size: 30
+  active_type: "linear"
+  inputs {
+    input_layer_name: "data"
+  }
+  trans_type: "non-seq"
+  seq_pool_stride: 5
 }
 input_layer_names: "data"
 output_layer_names: "__first_seq_0__"
 output_layer_names: "__first_seq_1__"
 output_layer_names: "__last_seq_0__"
 output_layer_names: "__last_seq_1__"
+output_layer_names: "__first_seq_2__"
+output_layer_names: "__last_seq_2__"
 sub_models {
   name: "root"
   layer_names: "data"
@@ -59,11 +88,15 @@ sub_models {
   layer_names: "__first_seq_1__"
   layer_names: "__last_seq_0__"
   layer_names: "__last_seq_1__"
+  layer_names: "__first_seq_2__"
+  layer_names: "__last_seq_2__"
   input_layer_names: "data"
   output_layer_names: "__first_seq_0__"
   output_layer_names: "__first_seq_1__"
   output_layer_names: "__last_seq_0__"
   output_layer_names: "__last_seq_1__"
+  output_layer_names: "__first_seq_2__"
+  output_layer_names: "__last_seq_2__"
   is_recurrent_layer_group: false
 }
 
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr
index da8da1b541f37a09654202f68232b99e4dac9f61..9b8a2ad9687d313e6c5017c2d7331eddf539af92 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr
@@ -65,13 +65,28 @@ layers {
     }
   }
 }
+layers {
+  name: "__tanh_0__"
+  type: "mixed"
+  size: 100
+  active_type: "tanh"
+  inputs {
+    input_layer_name: "__sigmoid_0__"
+    proj_conf {
+      type: "identity"
+      name: "___tanh_0__.w0"
+      input_size: 100
+      output_size: 100
+    }
+  }
+}
 layers {
   name: "__square_0__"
   type: "mixed"
   size: 100
   active_type: "square"
   inputs {
-    input_layer_name: "__sigmoid_0__"
+    input_layer_name: "__tanh_0__"
     proj_conf {
       type: "identity"
       name: "___square_0__.w0"
@@ -81,15 +96,15 @@ layers {
   }
 }
 layers {
-  name: "__square_1__"
+  name: "__relu_0__"
   type: "mixed"
   size: 100
-  active_type: "square"
+  active_type: "relu"
   inputs {
     input_layer_name: "__square_0__"
     proj_conf {
       type: "identity"
-      name: "___square_1__.w0"
+      name: "___relu_0__.w0"
       input_size: 100
       output_size: 100
     }
@@ -101,7 +116,7 @@ layers {
   size: 100
   active_type: ""
   inputs {
-    input_layer_name: "__square_1__"
+    input_layer_name: "__relu_0__"
   }
   slope: 1.0
   intercept: 1
@@ -123,7 +138,7 @@ layers {
   size: 100
   active_type: ""
   inputs {
-    input_layer_name: "__square_1__"
+    input_layer_name: "__relu_0__"
     proj_conf {
       type: "identity"
       name: "___mixed_0__.w0"
@@ -147,7 +162,7 @@ layers {
   size: 100
   active_type: ""
   inputs {
-    input_layer_name: "__square_1__"
+    input_layer_name: "__relu_0__"
   }
   slope: -1.0
   intercept: 0.0
@@ -339,8 +354,9 @@ sub_models {
   layer_names: "__log_0__"
   layer_names: "__abs_0__"
   layer_names: "__sigmoid_0__"
+  layer_names: "__tanh_0__"
   layer_names: "__square_0__"
-  layer_names: "__square_1__"
+  layer_names: "__relu_0__"
   layer_names: "__slope_intercept_layer_0__"
   layer_names: "__slope_intercept_layer_1__"
   layer_names: "__mixed_0__"
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr
index 2943ab130bd7d6f3b78ea611f1c35850ccaf5e92..2afc3afef6d39ce9b8eef05948861284775d5011 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr
@@ -154,13 +154,40 @@ layers {
   inputs {
     input_layer_name: "img"
   }
+  inputs {
+    input_layer_name: "img"
+    input_parameter_name: "___mixed_6__.w1"
+    proj_conf {
+      type: "conv"
+      name: "___mixed_6__.w1"
+      input_size: 1024
+      output_size: 57600
+      conv_conf {
+        filter_size: 3
+        channels: 1
+        stride: 1
+        padding: 0
+        groups: 1
+        filter_channels: 1
+        output_x: 30
+        img_size: 32
+        caffe_mode: true
+        filter_size_y: 3
+        padding_y: 0
+        stride_y: 1
+        output_y: 30
+        img_size_y: 32
+      }
+      num_filters: 64
+    }
+  }
   inputs {
     input_layer_name: "filter"
   }
   operator_confs {
     type: "conv"
     input_indices: 0
-    input_indices: 1
+    input_indices: 2
     input_sizes: 1024
     input_sizes: 576
     output_size: 57600
@@ -186,38 +213,112 @@ layers {
 layers {
   name: "__mixed_7__"
   type: "mixed"
+  size: 254016
+  active_type: ""
+  inputs {
+    input_layer_name: "img"
+  }
+  inputs {
+    input_layer_name: "img"
+    input_parameter_name: "___mixed_7__.w1"
+    proj_conf {
+      type: "convt"
+      name: "___mixed_7__.w1"
+      input_size: 1024
+      output_size: 254016
+      conv_conf {
+        filter_size: 3
+        channels: 1
+        stride: 2
+        padding: 1
+        groups: 1
+        filter_channels: 64
+        output_x: 32
+        img_size: 63
+        caffe_mode: true
+        filter_size_y: 3
+        padding_y: 1
+        stride_y: 2
+        output_y: 32
+        img_size_y: 63
+      }
+      num_filters: 64
+    }
+  }
+  inputs {
+    input_layer_name: "filter"
+  }
+  operator_confs {
+    type: "convt"
+    input_indices: 0
+    input_indices: 2
+    input_sizes: 1024
+    input_sizes: 576
+    output_size: 254016
+    conv_conf {
+      filter_size: 3
+      channels: 1
+      stride: 2
+      padding: 1
+      groups: 1
+      filter_channels: 64
+      output_x: 32
+      img_size: 63
+      caffe_mode: true
+      filter_size_y: 3
+      padding_y: 1
+      stride_y: 2
+      output_y: 32
+      img_size_y: 63
+    }
+    num_filters: 64
+  }
+}
+layers {
+  name: "__mixed_8__"
+  type: "mixed"
   size: 100
   active_type: ""
   inputs {
     input_layer_name: "__mixed_4__"
-    input_parameter_name: "___mixed_7__.w0"
+    input_parameter_name: "___mixed_8__.w0"
     proj_conf {
       type: "fc"
-      name: "___mixed_7__.w0"
+      name: "___mixed_8__.w0"
       input_size: 300
       output_size: 100
     }
   }
   inputs {
     input_layer_name: "__mixed_5__"
-    input_parameter_name: "___mixed_7__.w1"
+    input_parameter_name: "___mixed_8__.w1"
     proj_conf {
       type: "trans_fc"
-      name: "___mixed_7__.w1"
+      name: "___mixed_8__.w1"
       input_size: 100
       output_size: 100
     }
   }
   inputs {
     input_layer_name: "__mixed_6__"
-    input_parameter_name: "___mixed_7__.w2"
+    input_parameter_name: "___mixed_8__.w2"
     proj_conf {
       type: "fc"
-      name: "___mixed_7__.w2"
+      name: "___mixed_8__.w2"
       input_size: 57600
       output_size: 100
     }
   }
+  inputs {
+    input_layer_name: "__mixed_7__"
+    input_parameter_name: "___mixed_8__.w3"
+    proj_conf {
+      type: "fc"
+      name: "___mixed_8__.w3"
+      input_size: 254016
+      output_size: 100
+    }
+  }
   drop_rate: 0.5
 }
 parameters {
@@ -281,7 +382,23 @@ parameters {
   initial_smart: true
 }
 parameters {
-  name: "___mixed_7__.w0"
+  name: "___mixed_6__.w1"
+  size: 576
+  initial_mean: 0.0
+  initial_std: 0.471404520791
+  initial_strategy: 0
+  initial_smart: false
+}
+parameters {
+  name: "___mixed_7__.w1"
+  size: 576
+  initial_mean: 0.0
+  initial_std: 0.471404520791
+  initial_strategy: 0
+  initial_smart: false
+}
+parameters {
+  name: "___mixed_8__.w0"
   size: 30000
   initial_mean: 0.0
   initial_std: 0.057735026919
@@ -291,7 +408,7 @@ parameters {
   initial_smart: true
 }
 parameters {
-  name: "___mixed_7__.w1"
+  name: "___mixed_8__.w1"
   size: 10000
   initial_mean: 0.0
   initial_std: 0.1
@@ -301,7 +418,7 @@ parameters {
   initial_smart: true
 }
 parameters {
-  name: "___mixed_7__.w2"
+  name: "___mixed_8__.w2"
   size: 5760000
   initial_mean: 0.0
   initial_std: 0.00416666666667
@@ -310,10 +427,20 @@ parameters {
   initial_strategy: 0
   initial_smart: true
 }
+parameters {
+  name: "___mixed_8__.w3"
+  size: 25401600
+  initial_mean: 0.0
+  initial_std: 0.00198412698413
+  dims: 254016
+  dims: 100
+  initial_strategy: 0
+  initial_smart: true
+}
 input_layer_names: "test"
 input_layer_names: "img"
 input_layer_names: "filter"
-output_layer_names: "__mixed_7__"
+output_layer_names: "__mixed_8__"
 sub_models {
   name: "root"
   layer_names: "test"
@@ -328,10 +455,11 @@ sub_models {
   layer_names: "filter"
   layer_names: "__mixed_6__"
   layer_names: "__mixed_7__"
+  layer_names: "__mixed_8__"
   input_layer_names: "test"
   input_layer_names: "img"
   input_layer_names: "filter"
-  output_layer_names: "__mixed_7__"
+  output_layer_names: "__mixed_8__"
   is_recurrent_layer_group: false
 }
 
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr
index b6905824f0cb090375a38ff67e39fc626df0b2f6..64530146a1458933d4ba0edffc1b1b7e60a21187 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr
@@ -128,6 +128,7 @@ layers {
     input_layer_name: "__simple_gru_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__last_seq_1__"
@@ -138,6 +139,7 @@ layers {
     input_layer_name: "__simple_gru_1__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__fc_layer_0__"
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr
index 0a83499b724806666a241489467207f3c7151a3a..79fa4c74f081aebadd258e06333de9eafe6a5ee3 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr
@@ -210,6 +210,7 @@ layers {
     input_layer_name: "__lstm_group_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__last_seq_1__"
@@ -220,6 +221,7 @@ layers {
     input_layer_name: "__lstm_group_1__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__fc_layer_0__"
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr
index dacb40185f863025528c2d4eeb8b325425953a93..68fa881b4f1408b8cd20f2417062ce035c0fda54 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr
@@ -143,6 +143,7 @@ layers {
     input_layer_name: "__recurrent_layer_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__first_seq_0__"
@@ -154,6 +155,7 @@ layers {
   }
   select_first: true
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__last_seq_1__"
@@ -164,6 +166,7 @@ layers {
     input_layer_name: "__lstmemory_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__first_seq_1__"
@@ -175,6 +178,7 @@ layers {
   }
   select_first: true
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__last_seq_2__"
@@ -185,6 +189,7 @@ layers {
     input_layer_name: "__gru_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__first_seq_2__"
@@ -196,6 +201,7 @@ layers {
   }
   select_first: true
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 parameters {
   name: "___fc_layer_0__.w0"
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr
index 10e59e21bc7a48bc53fb535f86f053c91f57c1df..05fd1c99d2db6e9faa3b3884ec9baf051791f9fe 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr
@@ -239,9 +239,9 @@ parameters {
   name: "___crf_layer_0__.w0"
   size: 24
   initial_mean: 0.0
-  initial_std: 0.5
-  dims: 4
+  initial_std: 0.408248290464
   dims: 6
+  dims: 4
   initial_strategy: 0
   initial_smart: true
 }
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr
index 811b38ae4a51e8faedb59fea2b81a8be3cceeae6..3244181a63109335c4fba6ca4dd04ac8f0446313 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr
@@ -45,7 +45,7 @@ layers {
   coeff: 1.0
 }
 layers {
-  name: "__regression_cost_0__"
+  name: "__mse_cost_0__"
   type: "square_error"
   size: 1
   active_type: ""
@@ -84,7 +84,7 @@ input_layer_names: "input"
 input_layer_names: "label"
 input_layer_names: "weight"
 output_layer_names: "__cost_0__"
-output_layer_names: "__regression_cost_0__"
+output_layer_names: "__mse_cost_0__"
 evaluators {
   name: "classification_error_evaluator"
   type: "classification_error"
@@ -99,12 +99,12 @@ sub_models {
   layer_names: "weight"
   layer_names: "__fc_layer_0__"
   layer_names: "__cost_0__"
-  layer_names: "__regression_cost_0__"
+  layer_names: "__mse_cost_0__"
   input_layer_names: "input"
   input_layer_names: "label"
   input_layer_names: "weight"
   output_layer_names: "__cost_0__"
-  output_layer_names: "__regression_cost_0__"
+  output_layer_names: "__mse_cost_0__"
   evaluator_names: "classification_error_evaluator"
   is_recurrent_layer_group: false
 }
diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr
index 3e9d28416ed5066461e960f0a9f085e057c28346..77b447aa9db2a6c323fd3c322e7e9ca1ed19a6dd 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr
+++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr
@@ -96,6 +96,7 @@ layers {
     input_layer_name: "rnn_forward"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__recurrent_group_1__"
@@ -145,6 +146,7 @@ layers {
   }
   select_first: true
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__recurrent_group_2__"
@@ -193,6 +195,7 @@ layers {
     input_layer_name: "rnn_subseq_forward"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__lstm_group_0___recurrent_group"
@@ -282,6 +285,7 @@ layers {
     input_layer_name: "__lstm_group_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 layers {
   name: "__gru_group_0___recurrent_group"
@@ -330,6 +334,56 @@ layers {
     input_layer_name: "__gru_group_0__"
   }
   trans_type: "non-seq"
+  seq_pool_stride: -1
+}
+layers {
+  name: "__recurrent_group_3__"
+  type: "recurrent_layer_group"
+  active_type: ""
+}
+layers {
+  name: "seq_input@__recurrent_group_3__"
+  type: "scatter_agent"
+  size: 100
+  active_type: ""
+}
+layers {
+  name: "__memory_6__@__recurrent_group_3__"
+  type: "agent"
+  size: 200
+  active_type: ""
+}
+layers {
+  name: "__fc_layer_0__@__recurrent_group_3__"
+  type: "fc"
+  size: 200
+  active_type: "tanh"
+  inputs {
+    input_layer_name: "seq_input@__recurrent_group_3__"
+    input_parameter_name: "___fc_layer_0__@__recurrent_group_3__.w0"
+  }
+  inputs {
+    input_layer_name: "__memory_6__@__recurrent_group_3__"
+    input_parameter_name: "___fc_layer_0__@__recurrent_group_3__.w1"
+  }
+  bias_parameter_name: "___fc_layer_0__@__recurrent_group_3__.wbias"
+}
+layers {
+  name: "__fc_layer_0__"
+  type: "gather_agent"
+  size: 200
+  active_type: ""
+}
+layers {
+  name: "__last_seq_4__"
+  type: "seqlastins"
+  size: 200
+  active_type: "linear"
+  inputs {
+    input_layer_name: "__fc_layer_0__"
+  }
+  trans_type: "non-seq"
+  seq_pool_stride: -1
 }
 parameters {
   name: "___mixed_0__.w0"
@@ -481,6 +535,36 @@ parameters {
   initial_strategy: 0
   initial_smart: false
 }
+parameters {
+  name: "___fc_layer_0__@__recurrent_group_3__.w0"
+  size: 20000
+  initial_mean: 0.0
+  initial_std: 0.1
+  dims: 100
+  dims: 200
+  initial_strategy: 0
+  initial_smart: true
+}
+parameters {
+  name: "___fc_layer_0__@__recurrent_group_3__.w1"
+  size: 40000
+  initial_mean: 0.0
+  initial_std: 0.0707106781187
+  dims: 200
+  dims: 200
+  initial_strategy: 0
+  initial_smart: true
+}
+parameters {
+  name: "___fc_layer_0__@__recurrent_group_3__.wbias"
+  size: 200
+  initial_mean: 0.0
+  initial_std: 0.0
+  dims: 1
+  dims: 200
+  initial_strategy: 0
+  initial_smart: false
+}
 input_layer_names: "seq_input"
 input_layer_names: "sub_seq_input"
 output_layer_names: "__last_seq_0__"
@@ -488,6 +572,7 @@ output_layer_names: "__first_seq_0__"
 output_layer_names: "__last_seq_1__"
 output_layer_names: "__last_seq_2__"
 output_layer_names: "__last_seq_3__"
+output_layer_names: "__last_seq_4__"
 sub_models {
   name: "root"
   layer_names: "seq_input"
@@ -510,6 +595,9 @@ sub_models {
   layer_names: "__gru_group_0___recurrent_group"
   layer_names: "__gru_group_0__"
   layer_names: "__last_seq_3__"
+  layer_names: "__recurrent_group_3__"
+  layer_names: "__fc_layer_0__"
+  layer_names: "__last_seq_4__"
   input_layer_names: "seq_input"
   input_layer_names: "sub_seq_input"
   output_layer_names: "__last_seq_0__"
@@ -517,6 +605,7 @@ sub_models {
   output_layer_names: "__last_seq_1__"
   output_layer_names: "__last_seq_2__"
   output_layer_names: "__last_seq_3__"
+  output_layer_names: "__last_seq_4__"
   is_recurrent_layer_group: false
 }
 sub_models {
@@ -647,4 +736,28 @@ sub_models {
   }
   target_inlinkid: -1
 }
+sub_models {
+  name: "__recurrent_group_3__"
+  layer_names: "seq_input@__recurrent_group_3__"
+  layer_names: "__memory_6__@__recurrent_group_3__"
+  layer_names: "__fc_layer_0__@__recurrent_group_3__"
+  is_recurrent_layer_group: true
+  reversed: false
+  memories {
+    layer_name: "__fc_layer_0__@__recurrent_group_3__"
+    link_name: "__memory_6__@__recurrent_group_3__"
+    is_sequence: false
+  }
+  in_links {
+    layer_name: "seq_input"
+    link_name: "seq_input@__recurrent_group_3__"
+    has_subseq: false
+  }
+  out_links {
+    layer_name: "__fc_layer_0__@__recurrent_group_3__"
+    link_name: "__fc_layer_0__"
+    has_subseq: false
+  }
+  target_inlinkid: -1
+}
 
diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py
index d30f70a55c5b1834074966dfb3f378e01447c8ab..1c0aa7f9b9ee45b9eaf82dc46a2648d834dcd4ad 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py
+++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py
@@ -10,5 +10,5 @@ fc = fc_layer(input=data, size=10, act=SoftmaxActivation())
 outputs(
     classification_cost(
         input=fc, label=lbl, weight=wt),
-    regression_cost(
+    mse_cost(
         input=fc, label=lbl, weight=wt))
diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py
index 60b4849d69d497109ef5af3257e212df233a2d0b..91010759e4847f087eb4e05ad98ae794a2129365 100644
--- a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py
+++ b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py
@@ -16,6 +16,16 @@ def generate_rnn_simple(name):
     return rnn_simple
 
 
+def generate_rnn_simple_no_name():
+    def rnn_simple(s):
+        m = memory(name=None, size=200)
+        fc = fc_layer(input=[s, m], size=200)
+        m.set_input(fc)
+        return fc
+
+    return rnn_simple
+
+
 with mixed_layer() as lstm_param:  # test lstm unit, rnn group
     lstm_param += full_matrix_projection(input=seq, size=100 * 4)
 
@@ -33,4 +43,6 @@ outputs(
     last_seq(input=lstmemory_group(
         input=lstm_param, size=100)),
     last_seq(input=gru_group(
-        input=gru_param, size=100)))
+        input=gru_param, size=100)),
+    last_seq(input=recurrent_group(
+        step=generate_rnn_simple_no_name(), input=seq)), )
diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py
index 0cf7b8e9039f7393ae9fcf73faeeeb8fbf11df31..7c8f6ea62fcb74700f7356ed4b937a3aaa1c7092 100644
--- a/python/paddle/v2/__init__.py
+++ b/python/paddle/v2/__init__.py
@@ -18,12 +18,25 @@ import parameters
 import trainer
 import event
 import data_type
+import topology
+import data_feeder
+import networks
+import evaluator
+from . import dataset
+from . import reader
+from . import plot
 import attr
+import pooling
+import inference
+import networks
 import py_paddle.swig_paddle as api
+import minibatch
+import plot
 
 __all__ = [
     'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
-    'event', 'data_type', 'attr'
+    'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
+    'topology', 'networks', 'infer', 'plot', 'evaluator'
 ]
 
 
@@ -33,3 +46,7 @@ def init(**kwargs):
         args.append('--%s=%s' % (key, str(kwargs[key])))
 
     api.initPaddle(*args)
+
+
+infer = inference.infer
+batch = minibatch.batch
diff --git a/python/paddle/v2/activation.py b/python/paddle/v2/activation.py
index 1f3aab9ef3c5f69e22d7e83250d0ff46c1ff718a..21261a178203b633ca6cf59a5fc89edc24a868b9 100644
--- a/python/paddle/v2/activation.py
+++ b/python/paddle/v2/activation.py
@@ -12,26 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from paddle.trainer_config_helpers.activations import *
+import paddle.trainer_config_helpers.activations
+import copy
 
-__all__ = [
-    "Base", "Tanh", "Sigmoid", "Softmax", "Identity", "Linear",
-    'SequenceSoftmax', "Exp", "Relu", "BRelu", "SoftRelu", "STanh", "Abs",
-    "Square", "Log"
-]
+__all__ = []
 
-Base = BaseActivation
-Tanh = TanhActivation
-Sigmoid = SigmoidActivation
-Softmax = SoftmaxActivation
-SequenceSoftmax = SequenceSoftmaxActivation
-Identity = IdentityActivation
-Linear = Identity
-Relu = ReluActivation
-BRelu = BReluActivation
-SoftRelu = SoftReluActivation
-STanh = STanhActivation
-Abs = AbsActivation
-Square = SquareActivation
-Exp = ExpActivation
-Log = LogActivation
+suffix = 'Activation'
+for act in paddle.trainer_config_helpers.activations.__all__:
+    new_name = act[:-len(suffix)]
+    globals()[new_name] = copy.copy(
+        getattr(paddle.trainer_config_helpers.activations, act))
+    globals()[new_name].__name__ = new_name
+    __all__.append(new_name)
diff --git a/python/paddle/v2/attr.py b/python/paddle/v2/attr.py
index 40c64f621b443d5613468b27d030ab08776641b2..32f78614e7f8abe7cffdc7a50a9fa77f1fc1a780 100644
--- a/python/paddle/v2/attr.py
+++ b/python/paddle/v2/attr.py
@@ -12,12 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from paddle.trainer_config_helpers.attrs import *
+import paddle.trainer_config_helpers.attrs
 
 __all__ = [
     "Param",
     "Extra",
 ]
 
-Param = ParameterAttribute
-Extra = ExtraLayerAttribute
+Param = paddle.trainer_config_helpers.attrs.ParameterAttribute
+Extra = paddle.trainer_config_helpers.attrs.ExtraLayerAttribute
+
+for each in paddle.trainer_config_helpers.attrs.__all__:
+    globals()[each] = getattr(paddle.trainer_config_helpers.attrs, each)
+    __all__.append(each)
diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e8da563e0d65d534d3f224fe5f1c39a67eeb54
--- /dev/null
+++ b/python/paddle/v2/config_base.py
@@ -0,0 +1,214 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import re
+from paddle.trainer_config_helpers.default_decorators import wrap_name_default
+import paddle.trainer_config_helpers as conf_helps
+
+
+class LayerType(type):
+    def __new__(cls, name, bases, attrs):
+        method_name = attrs.get('METHOD_NAME', None)
+        if method_name is not None:
+            method = getattr(conf_helps, method_name)
+            if method.__doc__ is not None:
+                mapper = attrs.get("__map_docstr__", None)
+                if mapper is not None:
+                    attrs['__doc__'] = LayerType.__map_docstr__(
+                        mapper(method.__doc__),
+                        method_name=method_name,
+                        name=name)
+                else:
+                    attrs['__doc__'] = LayerType.__map_docstr__(
+                        method.__doc__, method_name=method_name, name=name)
+        return super(LayerType, cls).__new__(cls, name, bases, attrs)
+
+    @staticmethod
+    def __map_docstr__(doc, name, method_name):
+        assert isinstance(doc, basestring)
+
+        # replace LayerOutput to paddle.v2.config_base.Layer
+        doc = doc.replace("LayerOutput", "paddle.v2.config_base.Layer")
+
+        doc = doc.replace('ParameterAttribute',
+                          'paddle.v2.attr.ParameterAttribute')
+
+        doc = re.sub(r'ExtraLayerAttribute[^\s]?',
+                     'paddle.v2.attr.ExtraAttribute', doc)
+
+        # xxx_layer to xxx
+        doc = re.sub(r"(?P[a-z]+)_layer", r"\g", doc)
+
+        # XxxxActivation to paddle.v2.Activation.Xxxx
+        doc = re.sub(r"(?P[A-Z][a-zA-Z]+)Activation",
+                     r"paddle.v2.Activation.\g", doc)
+
+        # TODO(yuyang18): Add more rules if needed.
+        return doc
+
+
+class Layer(object):
+    __metaclass__ = LayerType
+
+    def __init__(self, name=None, parent_layers=None):
+        assert isinstance(parent_layers, dict)
+        self.name = name
+        self.__context__ = {}
+        self.__parent_layers__ = parent_layers
+        # some layer may have some extra parent layer
+        self.__extra_parent__ = []
+        # used for evaluator.
+        self.__children_layers__ = []
+
+    def extra_parent(self):
+        return self.__extra_parent__
+
+    def append_extra_parent(self, parent):
+        self.__extra_parent__.append(parent)
+
+    def append_child(self, layer, parent_names):
+        self.__children_layers__.append((layer, parent_names))
+
+    def to_proto(self, context):
+        """
+        function to set proto attribute
+        """
+        self.__context__ = context
+
+        # STEP: short cut if this layer is parsed before.
+        if self.context_name() in context:
+            if self.use_context_name():
+                return context[self.context_name()]
+            else:
+                return context[self.name]
+
+        # STEP: parse extra_parent that is not used by this layer but must
+        # be parsed before this layer.
+        for p in self.__extra_parent__:
+            p.to_proto(context=context)
+
+        # STEP: parse parent that is used by this layer, get the result and
+        # insert into kwargs of the next layer's to_proto_impl method.
+        kwargs = dict()
+        for layer_name in self.__parent_layers__:
+            if not isinstance(self.__parent_layers__[layer_name],
+                              collections.Sequence):
+                v1_layer = self.__parent_layers__[layer_name].to_proto(
+                    context=context)
+            else:
+                v1_layer = map(lambda x: x.to_proto(context=context),
+                               self.__parent_layers__[layer_name])
+            kwargs[layer_name] = v1_layer
+
+        # STEP: parse myself and add myself into context.
+        ret_val = self.to_proto_impl(**kwargs)
+        if self.context_name() is not None \
+                and self.context_name() not in context:
+            context[self.context_name()] = ret_val
+
+        # STEP: parse children that should be pased after this layer.
+        for layer, pnames in self.__children_layers__:
+            drop = False
+
+            # child will only be parsed if all parents are in context.
+            for pname in pnames:
+                if pname not in context:
+                    drop = True
+                    break
+            if drop:
+                continue
+            layer.to_proto(context=context)
+
+        # STEP: return v1 layer result
+        if self.context_name() is None:
+            return ret_val
+        elif self.use_context_name():
+            return context[self.context_name()]
+        else:
+            return context[self.name]
+
+    def to_proto_impl(self, **kwargs):
+        raise NotImplementedError()
+
+    def context_name(self):
+        """
+        Context name means the context which stores `to_proto_impl` result.
+        If multiple layer share same context_name, the `to_proto_impl` of them
+        will be invoked only once.
+        """
+        return self.name
+
+    def use_context_name(self):
+        return False
+
+    def calculate_size(self):
+        """
+        lazy calculate size of the layer, should be called when to_proto_impl of
+        this layer is called.
+        :return:
+        """
+        return self.__context__[self.context_name()].size
+
+
+def __convert_to_v2__(method_name,
+                      parent_names,
+                      is_default_name=True,
+                      attach_parent=False):
+    if is_default_name:
+        wrapper = wrap_name_default(name_prefix=method_name)
+    else:
+        wrapper = None
+
+    class V2LayerImpl(Layer):
+        METHOD_NAME = method_name
+
+        def __init__(self, **kwargs):
+            parent_layers = dict()
+            other_kwargs = dict()
+            for pname in parent_names:
+                if pname in kwargs:
+                    parent_layers[pname] = kwargs[pname]
+
+            if attach_parent:
+                pnames = [x.context_name() for x in parent_layers.values()]
+
+                for pname in parent_layers:
+                    layers = kwargs[pname]
+                    if not isinstance(layers, collections.Sequence):
+                        layers = [layers]
+
+                    for layer in layers:
+                        layer.append_child(self, pnames)
+
+            for key in kwargs.keys():
+                if key not in parent_names:
+                    other_kwargs[key] = kwargs[key]
+
+            name = kwargs.get('name', None)
+            super(V2LayerImpl, self).__init__(name, parent_layers)
+            self.__other_kwargs__ = other_kwargs
+
+        if wrapper is not None:
+            __init__ = wrapper(__init__)
+
+        def to_proto_impl(self, **kwargs):
+            args = dict()
+            for each in kwargs:
+                args[each] = kwargs[each]
+            for each in self.__other_kwargs__:
+                args[each] = self.__other_kwargs__[each]
+            return getattr(conf_helps, method_name)(**args)
+
+    return V2LayerImpl
diff --git a/python/paddle/v2/data_feeder.py b/python/paddle/v2/data_feeder.py
new file mode 100644
index 0000000000000000000000000000000000000000..2698251b9e15046eb14f71c3f5b0546ecbb4a5dd
--- /dev/null
+++ b/python/paddle/v2/data_feeder.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from py_paddle import DataProviderConverter
+import collections
+import paddle.trainer.PyDataProvider2 as pydp2
+
+__all__ = ['DataFeeder']
+
+
+def default_feeding_map(data_types):
+    reader_dict = dict()
+    for i, tp in enumerate(data_types):
+        reader_dict[tp[0]] = i
+    return reader_dict
+
+
+class DataFeeder(DataProviderConverter):
+    """
+    DataFeeder converts the data returned by paddle.reader into a data structure
+    of Arguments which is defined in the API. The paddle.reader usually returns
+    a list of mini-batch data entries. Each data entry in the list is one sample.
+    Each sample is a list or a tuple with one feature or multiple features.
+    DataFeeder converts this mini-batch data entries into Arguments in order
+    to feed it to C++ interface.
+    
+    The simple usage shows below
+
+    ..  code-block:: python
+
+        feeding = ['image', 'label']
+        data_types = enumerate_data_types_of_data_layers(topology)
+        feeder = DataFeeder(data_types=data_types, feeding=feeding)
+
+        minibatch_data = [([1.0, 2.0, 3.0, ...], 5)]
+
+        arg = feeder(minibatch_data)
+
+
+    If mini-batch data and data layers are not one to one mapping, we
+    could pass a dictionary to feeding parameter to represent the mapping
+    relationship.
+
+
+    ..  code-block:: python
+
+        data_types = [('image', paddle.data_type.dense_vector(784)),
+                      ('label', paddle.data_type.integer_value(10))]
+        feeding = {'image':0, 'label':1}
+        feeder = DataFeeder(data_types=data_types, feeding=feeding)
+        minibatch_data = [
+                           ( [1.0,2.0,3.0,4.0], 5, [6,7,8] ),  # first sample
+                           ( [1.0,2.0,3.0,4.0], 5, [6,7,8] )   # second sample
+                         ]
+        # or minibatch_data = [
+        #                       [ [1.0,2.0,3.0,4.0], 5, [6,7,8] ],  # first sample
+        #                       [ [1.0,2.0,3.0,4.0], 5, [6,7,8] ]   # second sample
+        #                     ]
+        arg = feeder.convert(minibatch_data)
+
+    ..  note::
+
+        This module is for internal use only. Users should use the `reader`
+        interface.
+
+
+
+    :param data_types: A list to specify data name and type. Each item is
+                       a tuple of (data_name, data_type).
+
+    :type data_types: list
+    :param feeding: A dictionary or a sequence to specify the position of each
+                    data in the input data.
+    :type feeding: dict|collections.Sequence|None
+    """
+
+    def __init__(self, data_types, feeding=None):
+        self.input_names = []
+        input_types = []
+        if feeding is None:
+            feeding = default_feeding_map(data_types)
+        elif isinstance(feeding, collections.Sequence):
+            feed_list = feeding
+            feeding = dict()
+            for i, name in enumerate(feed_list):
+                feeding[name] = i
+        elif not isinstance(feeding, dict):
+            raise TypeError("Feeding should be dict or sequence or None.")
+
+        self.feeding = feeding
+        for each in data_types:
+            self.input_names.append(each[0])
+            if not isinstance(each[1], pydp2.InputType):
+                raise TypeError("second item in each data_type should be an "
+                                "InputType")
+            input_types.append(each[1])
+        DataProviderConverter.__init__(self, input_types)
+
+    def __len__(self):
+        return len(self.input_names)
+
+    def convert(self, dat, argument=None):
+        """
+        :param dat: A list of mini-batch data. Each sample is a list or tuple
+                    one feature or multiple features.
+
+        :type dat: list
+        :param argument: An Arguments object contains this mini-batch data with
+                         one or multiple features. The Arguments definition is
+                         in the API.
+        :type argument: py_paddle.swig_paddle.Arguments
+        """
+
+        def reorder_data(data):
+            retv = []
+            for each in data:
+                reorder = []
+                for name in self.input_names:
+                    reorder.append(each[self.feeding[name]])
+                retv.append(reorder)
+            return retv
+
+        return DataProviderConverter.convert(self, reorder_data(dat), argument)
diff --git a/python/paddle/v2/data_type.py b/python/paddle/v2/data_type.py
index 5b01ba4cd4866cf7b355fc0a6a667409cf9c4419..d582f76ddf01ed3430a1d075624bbb8e0bf3f2a9 100644
--- a/python/paddle/v2/data_type.py
+++ b/python/paddle/v2/data_type.py
@@ -12,11 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from paddle.trainer.PyDataProvider2 import \
-    InputType, dense_vector, sparse_binary_vector,\
-    sparse_vector, integer_value
+import paddle.trainer.PyDataProvider2 as pydp2
 
-__all__ = [
-    'InputType', 'dense_vector', 'sparse_binary_vector', 'sparse_vector',
-    'integer_value'
+import_list = [
+    nm for nm in dir(pydp2)
+    if '_' in nm and nm[0] != '_' and ('value' in nm or 'vector' in nm)
 ]
+import_list.extend(['InputType'])
+
+for nm in import_list:
+    globals()[nm] = getattr(pydp2, nm)
+
+__all__ = import_list
diff --git a/python/paddle/reader/__init__.py b/python/paddle/v2/dataset/__init__.py
similarity index 64%
rename from python/paddle/reader/__init__.py
rename to python/paddle/v2/dataset/__init__.py
index 493b410e8299ebe167be43ead1401a6ab245a631..80ff6295c34e853d8f69b9e78719af23a56d1fbb 100644
--- a/python/paddle/reader/__init__.py
+++ b/python/paddle/v2/dataset/__init__.py
@@ -11,13 +11,21 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+"""
+Dataset package.
+"""
 
-# It would be too lengthy to require our users to prefix decorators with `decorator`.
-# For example, we want the following line
-#
-#     r = paddle.reader.decorator.bufferd(paddle.reader.creator.text("hello.txt"))
-#
-# to be a shorter version:
-#
-#     r = paddle.reader.buffered(paddle.reader.creator.text("hello.txt"))
-from decorator import *
+import mnist
+import imikolov
+import imdb
+import cifar
+import movielens
+import conll05
+import uci_housing
+import sentiment
+import wmt14
+
+__all__ = [
+    'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment'
+    'uci_housing', 'wmt14'
+]
diff --git a/python/paddle/v2/dataset/cifar.py b/python/paddle/v2/dataset/cifar.py
new file mode 100644
index 0000000000000000000000000000000000000000..41fda1e8f24cdef13d8ab3645862814100a1cd4c
--- /dev/null
+++ b/python/paddle/v2/dataset/cifar.py
@@ -0,0 +1,124 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+CIFAR dataset.
+
+This module will download dataset from
+https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into
+paddle reader creators.
+
+The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
+with 6000 images per class. There are 50000 training images and 10000 test
+images.
+
+The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes
+containing 600 images each. There are 500 training images and 100 testing
+images per class.
+
+"""
+
+import cPickle
+import itertools
+import numpy
+from common import download
+import tarfile
+
+__all__ = ['train100', 'test100', 'train10', 'test10']
+
+URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/'
+CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz'
+CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a'
+CIFAR100_URL = URL_PREFIX + 'cifar-100-python.tar.gz'
+CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'
+
+
+def reader_creator(filename, sub_name):
+    def read_batch(batch):
+        data = batch['data']
+        labels = batch.get('labels', batch.get('fine_labels', None))
+        assert labels is not None
+        for sample, label in itertools.izip(data, labels):
+            yield (sample / 255.0).astype(numpy.float32), int(label)
+
+    def reader():
+        with tarfile.open(filename, mode='r') as f:
+            names = (each_item.name for each_item in f
+                     if sub_name in each_item.name)
+
+            for name in names:
+                batch = cPickle.load(f.extractfile(name))
+                for item in read_batch(batch):
+                    yield item
+
+    return reader
+
+
+def train100():
+    """
+    CIFAR-100 training set creator.
+
+    It returns a reader creator, each sample in the reader is image pixels in
+    [0, 1] and label in [0, 99].
+
+    :return: Training reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'train')
+
+
+def test100():
+    """
+    CIFAR-100 test set cretor.
+
+    It returns a reader creator, each sample in the reader is image pixels in
+    [0, 1] and label in [0, 9].
+
+    :return: Test reader creator.
+    :rtype: callable
+    """
+    return reader_creator(download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'test')
+
+
+def train10():
+    """
+    CIFAR-10 training set creator.
+
+    It returns a reader creator, each sample in the reader is image pixels in
+    [0, 1] and label in [0, 9].
+
+    :return: Training reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'data_batch')
+
+
+def test10():
+    """
+    CIFAR-10 test set cretor.
+
+    It returns a reader creator, each sample in the reader is image pixels in
+    [0, 1] and label in [0, 9].
+
+    :return: Test reader creator.
+    :rtype: callable
+    """
+    return reader_creator(
+        download(CIFAR10_URL, 'cifar', CIFAR10_MD5), 'test_batch')
+
+
+def fetch():
+    download(CIFAR10_URL, 'cifar', CIFAR10_MD5)
+    download(CIFAR100_URL, 'cifar', CIFAR100_MD5)
diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..2eb018b8d60e9a8bd0091836ab56c35b05786fca
--- /dev/null
+++ b/python/paddle/v2/dataset/common.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+import hashlib
+import os
+import shutil
+import sys
+import importlib
+import paddle.v2.dataset
+
+__all__ = ['DATA_HOME', 'download', 'md5file']
+
+DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
+
+if not os.path.exists(DATA_HOME):
+    os.makedirs(DATA_HOME)
+
+
+def md5file(fname):
+    hash_md5 = hashlib.md5()
+    f = open(fname, "rb")
+    for chunk in iter(lambda: f.read(4096), b""):
+        hash_md5.update(chunk)
+    f.close()
+    return hash_md5.hexdigest()
+
+
+def download(url, module_name, md5sum):
+    dirname = os.path.join(DATA_HOME, module_name)
+    if not os.path.exists(dirname):
+        os.makedirs(dirname)
+
+    filename = os.path.join(dirname, url.split('/')[-1])
+    if not (os.path.exists(filename) and md5file(filename) == md5sum):
+        print "Cache file %s not found, downloading %s" % (filename, url)
+        r = requests.get(url, stream=True)
+        total_length = r.headers.get('content-length')
+
+        if total_length is None:
+            with open(filename, 'w') as f:
+                shutil.copyfileobj(r.raw, f)
+        else:
+            with open(filename, 'w') as f:
+                dl = 0
+                total_length = int(total_length)
+                for data in r.iter_content(chunk_size=4096):
+                    dl += len(data)
+                    f.write(data)
+                    done = int(50 * dl / total_length)
+                    sys.stdout.write("\r[%s%s]" % ('=' * done,
+                                                   ' ' * (50 - done)))
+                    sys.stdout.flush()
+
+    return filename
+
+
+def fetch_all():
+    for module_name in filter(lambda x: not x.startswith("__"),
+                              dir(paddle.v2.dataset)):
+        if "fetch" in dir(
+                importlib.import_module("paddle.v2.dataset.%s" % module_name)):
+            getattr(
+                importlib.import_module("paddle.v2.dataset.%s" % module_name),
+                "fetch")()
diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py
new file mode 100644
index 0000000000000000000000000000000000000000..12d648bf6557ed6e437320e56a80294abac29f18
--- /dev/null
+++ b/python/paddle/v2/dataset/conll05.py
@@ -0,0 +1,223 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Conll05 dataset.
+Paddle semantic role labeling Book and demo use this dataset as an example.
+Because Conll05 is not free in public, the default downloaded URL is test set
+of Conll05 (which is public). Users can change URL and MD5 to their Conll
+dataset. And a pre-trained word vector model based on Wikipedia corpus is used
+to initialize SRL model.
+"""
+
+import tarfile
+import gzip
+import itertools
+from common import download
+
+__all__ = ['test, get_dict', 'get_embedding']
+
+DATA_URL = 'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz'
+DATA_MD5 = '387719152ae52d60422c016e92a742fc'
+WORDDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt'
+WORDDICT_MD5 = 'ea7fb7d4c75cc6254716f0177a506baa'
+VERBDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt'
+VERBDICT_MD5 = '0d2977293bbb6cbefab5b0f97db1e77c'
+TRGDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt'
+TRGDICT_MD5 = 'd8c7f03ceb5fc2e5a0fa7503a4353751'
+EMB_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb'
+EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7'
+
+UNK_IDX = 0
+
+
+def load_dict(filename):
+    d = dict()
+    with open(filename, 'r') as f:
+        for i, line in enumerate(f):
+            d[line.strip()] = i
+    return d
+
+
+def corpus_reader(data_path, words_name, props_name):
+    """
+    Read one corpus. It returns an iterator. Each element of
+    this iterator is a tuple including sentence and labels. The sentence is
+    consist of a list of word IDs. The labels include a list of label IDs.
+    :return: a iterator of data.
+    :rtype: iterator
+    """
+
+    def reader():
+        tf = tarfile.open(data_path)
+        wf = tf.extractfile(words_name)
+        pf = tf.extractfile(props_name)
+        with gzip.GzipFile(fileobj=wf) as words_file, gzip.GzipFile(
+                fileobj=pf) as props_file:
+            sentences = []
+            labels = []
+            one_seg = []
+            for word, label in itertools.izip(words_file, props_file):
+                word = word.strip()
+                label = label.strip().split()
+
+                if len(label) == 0:  # end of sentence
+                    for i in xrange(len(one_seg[0])):
+                        a_kind_lable = [x[i] for x in one_seg]
+                        labels.append(a_kind_lable)
+
+                    if len(labels) >= 1:
+                        verb_list = []
+                        for x in labels[0]:
+                            if x != '-':
+                                verb_list.append(x)
+
+                        for i, lbl in enumerate(labels[1:]):
+                            cur_tag = 'O'
+                            is_in_bracket = False
+                            lbl_seq = []
+                            verb_word = ''
+                            for l in lbl:
+                                if l == '*' and is_in_bracket == False:
+                                    lbl_seq.append('O')
+                                elif l == '*' and is_in_bracket == True:
+                                    lbl_seq.append('I-' + cur_tag)
+                                elif l == '*)':
+                                    lbl_seq.append('I-' + cur_tag)
+                                    is_in_bracket = False
+                                elif l.find('(') != -1 and l.find(')') != -1:
+                                    cur_tag = l[1:l.find('*')]
+                                    lbl_seq.append('B-' + cur_tag)
+                                    is_in_bracket = False
+                                elif l.find('(') != -1 and l.find(')') == -1:
+                                    cur_tag = l[1:l.find('*')]
+                                    lbl_seq.append('B-' + cur_tag)
+                                    is_in_bracket = True
+                                else:
+                                    raise RuntimeError('Unexpected label: %s' %
+                                                       l)
+
+                            yield sentences, verb_list[i], lbl_seq
+
+                    sentences = []
+                    labels = []
+                    one_seg = []
+                else:
+                    sentences.append(word)
+                    one_seg.append(label)
+
+        pf.close()
+        wf.close()
+        tf.close()
+
+    return reader
+
+
+def reader_creator(corpus_reader,
+                   word_dict=None,
+                   predicate_dict=None,
+                   label_dict=None):
+    def reader():
+        for sentence, predicate, labels in corpus_reader():
+
+            sen_len = len(sentence)
+
+            verb_index = labels.index('B-V')
+            mark = [0] * len(labels)
+            if verb_index > 0:
+                mark[verb_index - 1] = 1
+                ctx_n1 = sentence[verb_index - 1]
+            else:
+                ctx_n1 = 'bos'
+
+            if verb_index > 1:
+                mark[verb_index - 2] = 1
+                ctx_n2 = sentence[verb_index - 2]
+            else:
+                ctx_n2 = 'bos'
+
+            mark[verb_index] = 1
+            ctx_0 = sentence[verb_index]
+
+            if verb_index < len(labels) - 1:
+                mark[verb_index + 1] = 1
+                ctx_p1 = sentence[verb_index + 1]
+            else:
+                ctx_p1 = 'eos'
+
+            if verb_index < len(labels) - 2:
+                mark[verb_index + 2] = 1
+                ctx_p2 = sentence[verb_index + 2]
+            else:
+                ctx_p2 = 'eos'
+
+            word_idx = [word_dict.get(w, UNK_IDX) for w in sentence]
+
+            ctx_n2_idx = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len
+            ctx_n1_idx = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len
+            ctx_0_idx = [word_dict.get(ctx_0, UNK_IDX)] * sen_len
+            ctx_p1_idx = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len
+            ctx_p2_idx = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len
+
+            pred_idx = [predicate_dict.get(predicate)] * sen_len
+            label_idx = [label_dict.get(w) for w in labels]
+
+            yield word_idx, ctx_n2_idx, ctx_n1_idx, \
+              ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx
+
+    return reader
+
+
+def get_dict():
+    """
+    Get the word, verb and label dictionary of Wikipedia corpus.
+    """
+    word_dict = load_dict(download(WORDDICT_URL, 'conll05st', WORDDICT_MD5))
+    verb_dict = load_dict(download(VERBDICT_URL, 'conll05st', VERBDICT_MD5))
+    label_dict = load_dict(download(TRGDICT_URL, 'conll05st', TRGDICT_MD5))
+    return word_dict, verb_dict, label_dict
+
+
+def get_embedding():
+    """
+    Get the trained word vector based on Wikipedia corpus.
+    """
+    return download(EMB_URL, 'conll05st', EMB_MD5)
+
+
+def test():
+    """
+    Conll05 test set creator.
+
+    Because the training dataset is not free, the test dataset is used for
+    training. It returns a reader creator, each sample in the reader is nine
+    features, including sentence sequence, predicate, predicate context,
+    predicate context flag and tagged sequence.
+
+    :return: Training reader creator
+    :rtype: callable
+    """
+    word_dict, verb_dict, label_dict = get_dict()
+    reader = corpus_reader(
+        download(DATA_URL, 'conll05st', DATA_MD5),
+        words_name='conll05st-release/test.wsj/words/test.wsj.words.gz',
+        props_name='conll05st-release/test.wsj/props/test.wsj.props.gz')
+    return reader_creator(reader, word_dict, verb_dict, label_dict)
+
+
+def fetch():
+    download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)
+    download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)
+    download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)
+    download(EMB_URL, 'conll05st', EMB_MD5)
+    download(DATA_URL, 'conll05st', DATA_MD5)
diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..5dc5abfe53d90ec3adc9a27a49ed086953146497
--- /dev/null
+++ b/python/paddle/v2/dataset/imdb.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+IMDB dataset.
+
+This module downloads IMDB dataset from
+http://ai.stanford.edu/%7Eamaas/data/sentiment/. This dataset contains a set
+of 25,000 highly polar movie reviews for training, and 25,000 for testing.
+Besides, this module also provides API for building dictionary.
+"""
+
+import paddle.v2.dataset.common
+import collections
+import tarfile
+import Queue
+import re
+import string
+import threading
+
+__all__ = ['build_dict', 'train', 'test']
+
+URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz'
+MD5 = '7c2ac02c03563afcf9b574c7e56c153a'
+
+
+def tokenize(pattern):
+    """
+    Read files that match the given pattern.  Tokenize and yield each file.
+    """
+
+    with tarfile.open(paddle.v2.dataset.common.download(URL, 'imdb',
+                                                        MD5)) as tarf:
+        # Note that we should use tarfile.next(), which does
+        # sequential access of member files, other than
+        # tarfile.extractfile, which does random access and might
+        # destroy hard disks.
+        tf = tarf.next()
+        while tf != None:
+            if bool(pattern.match(tf.name)):
+                # newline and punctuations removal and ad-hoc tokenization.
+                yield tarf.extractfile(tf).read().rstrip("\n\r").translate(
+                    None, string.punctuation).lower().split()
+            tf = tarf.next()
+
+
+def build_dict(pattern, cutoff):
+    """
+    Build a word dictionary from the corpus. Keys of the dictionary are words,
+    and values are zero-based IDs of these words.
+    """
+    word_freq = collections.defaultdict(int)
+    for doc in tokenize(pattern):
+        for word in doc:
+            word_freq[word] += 1
+
+    # Not sure if we should prune less-frequent words here.
+    word_freq = filter(lambda x: x[1] > cutoff, word_freq.items())
+
+    dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0]))
+    words, _ = list(zip(*dictionary))
+    word_idx = dict(zip(words, xrange(len(words))))
+    word_idx[''] = len(words)
+    return word_idx
+
+
+def reader_creator(pos_pattern, neg_pattern, word_idx, buffer_size):
+    UNK = word_idx['']
+
+    qs = [Queue.Queue(maxsize=buffer_size), Queue.Queue(maxsize=buffer_size)]
+
+    def load(pattern, queue):
+        for doc in tokenize(pattern):
+            queue.put(doc)
+        queue.put(None)
+
+    def reader():
+        # Creates two threads that loads positive and negative samples
+        # into qs.
+        t0 = threading.Thread(
+            target=load, args=(
+                pos_pattern,
+                qs[0], ))
+        t0.daemon = True
+        t0.start()
+
+        t1 = threading.Thread(
+            target=load, args=(
+                neg_pattern,
+                qs[1], ))
+        t1.daemon = True
+        t1.start()
+
+        # Read alternatively from qs[0] and qs[1].
+        i = 0
+        doc = qs[i].get()
+        while doc != None:
+            yield [word_idx.get(w, UNK) for w in doc], i % 2
+            i += 1
+            doc = qs[i % 2].get()
+
+        # If any queue is empty, reads from the other queue.
+        i += 1
+        doc = qs[i % 2].get()
+        while doc != None:
+            yield [word_idx.get(w, UNK) for w in doc], i % 2
+            doc = qs[i % 2].get()
+
+    return reader()
+
+
+def train(word_idx):
+    """
+    IMDB training set creator.
+
+    It returns a reader creator, each sample in the reader is an zero-based ID
+    sequence and label in [0, 1].
+
+    :param word_idx: word dictionary
+    :type word_idx: dict
+    :return: Training reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        re.compile("aclImdb/train/pos/.*\.txt$"),
+        re.compile("aclImdb/train/neg/.*\.txt$"), word_idx, 1000)
+
+
+def test(word_idx):
+    """
+    IMDB test set creator.
+
+    It returns a reader creator, each sample in the reader is an zero-based ID
+    sequence and label in [0, 1].
+
+    :param word_idx: word dictionary
+    :type word_idx: dict
+    :return: Test reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        re.compile("aclImdb/test/pos/.*\.txt$"),
+        re.compile("aclImdb/test/neg/.*\.txt$"), word_idx, 1000)
+
+
+def word_dict():
+    """
+    Build a word dictionary from the corpus.
+
+    :return: Word dictionary
+    :rtype: dict
+    """
+    return build_dict(
+        re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150)
+
+
+def fetch():
+    paddle.v2.dataset.common.download(URL, 'imdb', MD5)
diff --git a/python/paddle/v2/dataset/imikolov.py b/python/paddle/v2/dataset/imikolov.py
new file mode 100644
index 0000000000000000000000000000000000000000..41ca27e23632bea7e410f9d91920bbc539d38279
--- /dev/null
+++ b/python/paddle/v2/dataset/imikolov.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+imikolov's simple dataset.
+
+This module will download dataset from 
+http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
+into paddle reader creators.
+"""
+import paddle.v2.dataset.common
+import collections
+import tarfile
+
+__all__ = ['train', 'test', 'build_dict']
+
+URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
+MD5 = '30177ea32e27c525793142b6bf2c8e2d'
+
+
+def word_count(f, word_freq=None):
+    if word_freq is None:
+        word_freq = collections.defaultdict(int)
+
+    for l in f:
+        for w in l.strip().split():
+            word_freq[w] += 1
+        word_freq[''] += 1
+        word_freq[''] += 1
+
+    return word_freq
+
+
+def build_dict():
+    """
+    Build a word dictionary from the corpus,  Keys of the dictionary are words,
+    and values are zero-based IDs of these words.
+    """
+    train_filename = './simple-examples/data/ptb.train.txt'
+    test_filename = './simple-examples/data/ptb.valid.txt'
+    with tarfile.open(
+            paddle.v2.dataset.common.download(
+                paddle.v2.dataset.imikolov.URL, 'imikolov',
+                paddle.v2.dataset.imikolov.MD5)) as tf:
+        trainf = tf.extractfile(train_filename)
+        testf = tf.extractfile(test_filename)
+        word_freq = word_count(testf, word_count(trainf))
+        if '' in word_freq:
+            # remove  for now, since we will set it as last index
+            del word_freq['']
+
+        TYPO_FREQ = 50
+        word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items())
+
+        word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
+        words, _ = list(zip(*word_freq_sorted))
+        word_idx = dict(zip(words, xrange(len(words))))
+        word_idx[''] = len(words)
+
+    return word_idx
+
+
+def reader_creator(filename, word_idx, n):
+    def reader():
+        with tarfile.open(
+                paddle.v2.dataset.common.download(
+                    paddle.v2.dataset.imikolov.URL, 'imikolov',
+                    paddle.v2.dataset.imikolov.MD5)) as tf:
+            f = tf.extractfile(filename)
+
+            UNK = word_idx['']
+            for l in f:
+                l = [''] + l.strip().split() + ['']
+                if len(l) >= n:
+                    l = [word_idx.get(w, UNK) for w in l]
+                    for i in range(n, len(l) + 1):
+                        yield tuple(l[i - n:i])
+
+    return reader
+
+
+def train(word_idx, n):
+    """
+    imikolov training set creator.
+
+    It returns a reader creator, each sample in the reader is a word ID
+    tuple.
+
+    :param word_idx: word dictionary
+    :type word_idx: dict
+    :param n: sliding window size
+    :type n: int
+    :return: Training reader creator
+    :rtype: callable
+    """
+    return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n)
+
+
+def test(word_idx, n):
+    """
+    imikolov test set creator.
+
+    It returns a reader creator, each sample in the reader is a word ID
+    tuple.
+
+    :param word_idx: word dictionary
+    :type word_idx: dict
+    :param n: sliding window size
+    :type n: int
+    :return: Test reader creator
+    :rtype: callable
+    """
+    return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n)
+
+
+def fetch():
+    paddle.v2.dataset.common.download(URL, "imikolov", MD5)
diff --git a/python/paddle/v2/dataset/mnist.py b/python/paddle/v2/dataset/mnist.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1347d3c66da858104858bfb6739d84051322146
--- /dev/null
+++ b/python/paddle/v2/dataset/mnist.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+MNIST dataset.
+
+This module will download dataset from http://yann.lecun.com/exdb/mnist/ and
+parse training set and test set into paddle reader creators.
+"""
+import paddle.v2.dataset.common
+import subprocess
+import numpy
+import platform
+__all__ = ['train', 'test']
+
+URL_PREFIX = 'http://yann.lecun.com/exdb/mnist/'
+TEST_IMAGE_URL = URL_PREFIX + 't10k-images-idx3-ubyte.gz'
+TEST_IMAGE_MD5 = '9fb629c4189551a2d022fa330f9573f3'
+TEST_LABEL_URL = URL_PREFIX + 't10k-labels-idx1-ubyte.gz'
+TEST_LABEL_MD5 = 'ec29112dd5afa0611ce80d1b7f02629c'
+TRAIN_IMAGE_URL = URL_PREFIX + 'train-images-idx3-ubyte.gz'
+TRAIN_IMAGE_MD5 = 'f68b3c2dcbeaaa9fbdd348bbdeb94873'
+TRAIN_LABEL_URL = URL_PREFIX + 'train-labels-idx1-ubyte.gz'
+TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432'
+
+
+def reader_creator(image_filename, label_filename, buffer_size):
+    def reader():
+        if platform.system() == 'Darwin':
+            zcat_cmd = 'gzcat'
+        elif platform.system() == 'Linux':
+            zcat_cmd = 'zcat'
+        else:
+            raise NotImplementedError()
+
+        # According to http://stackoverflow.com/a/38061619/724872, we
+        # cannot use standard package gzip here.
+        m = subprocess.Popen([zcat_cmd, image_filename], stdout=subprocess.PIPE)
+        m.stdout.read(16)  # skip some magic bytes
+
+        l = subprocess.Popen([zcat_cmd, label_filename], stdout=subprocess.PIPE)
+        l.stdout.read(8)  # skip some magic bytes
+
+        try:  # reader could be break.
+            while True:
+                labels = numpy.fromfile(
+                    l.stdout, 'ubyte', count=buffer_size).astype("int")
+
+                if labels.size != buffer_size:
+                    break  # numpy.fromfile returns empty slice after EOF.
+
+                images = numpy.fromfile(
+                    m.stdout, 'ubyte', count=buffer_size * 28 * 28).reshape(
+                        (buffer_size, 28 * 28)).astype('float32')
+
+                images = images / 255.0 * 2.0 - 1.0
+
+                for i in xrange(buffer_size):
+                    yield images[i, :], int(labels[i])
+        finally:
+            m.terminate()
+            l.terminate()
+
+    return reader
+
+
+def train():
+    """
+    MNIST training set creator.
+
+    It returns a reader creator, each sample in the reader is image pixels in
+    [0, 1] and label in [0, 9].
+
+    :return: Training reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        paddle.v2.dataset.common.download(TRAIN_IMAGE_URL, 'mnist',
+                                          TRAIN_IMAGE_MD5),
+        paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist',
+                                          TRAIN_LABEL_MD5), 100)
+
+
+def test():
+    """
+    MNIST test set cretor.
+
+    It returns a reader creator, each sample in the reader is image pixels in
+    [0, 1] and label in [0, 9].
+
+    :return: Test reader creator.
+    :rtype: callable
+    """
+    return reader_creator(
+        paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist',
+                                          TEST_IMAGE_MD5),
+        paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist',
+                                          TEST_LABEL_MD5), 100)
+
+
+def fetch():
+    paddle.v2.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5)
+    paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
+    paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5)
+    paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
diff --git a/python/paddle/v2/dataset/movielens.py b/python/paddle/v2/dataset/movielens.py
new file mode 100644
index 0000000000000000000000000000000000000000..837a85912663826f0483aff4f6a38f3945375d82
--- /dev/null
+++ b/python/paddle/v2/dataset/movielens.py
@@ -0,0 +1,253 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Movielens 1-M dataset.
+
+Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000
+movies, which was collected by GroupLens Research. This module will download
+Movielens 1-M dataset from 
+http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training
+set and test set into paddle reader creators.
+
+"""
+
+import zipfile
+from common import download
+import re
+import random
+import functools
+
+__all__ = [
+    'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id',
+    'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info'
+]
+
+age_table = [1, 18, 25, 35, 45, 50, 56]
+
+URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
+MD5 = 'c4d9eecfca2ab87c1945afe126590906'
+
+
+class MovieInfo(object):
+    """
+    Movie id, title and categories information are stored in MovieInfo.
+    """
+
+    def __init__(self, index, categories, title):
+        self.index = int(index)
+        self.categories = categories
+        self.title = title
+
+    def value(self):
+        """
+        Get information from a movie.
+        """
+        return [
+            self.index, [CATEGORIES_DICT[c] for c in self.categories],
+            [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()]
+        ]
+
+    def __str__(self):
+        return "" % (
+            self.index, self.title, self.categories)
+
+    def __repr__(self):
+        return self.__str__()
+
+
+class UserInfo(object):
+    """
+    User id, gender, age, and job information are stored in UserInfo.
+    """
+
+    def __init__(self, index, gender, age, job_id):
+        self.index = int(index)
+        self.is_male = gender == 'M'
+        self.age = age_table.index(int(age))
+        self.job_id = int(job_id)
+
+    def value(self):
+        """
+        Get information from a user.
+        """
+        return [self.index, 0 if self.is_male else 1, self.age, self.job_id]
+
+    def __str__(self):
+        return "" % (
+            self.index, "M"
+            if self.is_male else "F", age_table[self.age], self.job_id)
+
+    def __repr__(self):
+        return str(self)
+
+
+MOVIE_INFO = None
+MOVIE_TITLE_DICT = None
+CATEGORIES_DICT = None
+USER_INFO = None
+
+
+def __initialize_meta_info__():
+    fn = download(URL, "movielens", MD5)
+    global MOVIE_INFO
+    if MOVIE_INFO is None:
+        pattern = re.compile(r'^(.*)\((\d+)\)$')
+        with zipfile.ZipFile(file=fn) as package:
+            for info in package.infolist():
+                assert isinstance(info, zipfile.ZipInfo)
+                MOVIE_INFO = dict()
+                title_word_set = set()
+                categories_set = set()
+                with package.open('ml-1m/movies.dat') as movie_file:
+                    for i, line in enumerate(movie_file):
+                        movie_id, title, categories = line.strip().split('::')
+                        categories = categories.split('|')
+                        for c in categories:
+                            categories_set.add(c)
+                        title = pattern.match(title).group(1)
+                        MOVIE_INFO[int(movie_id)] = MovieInfo(
+                            index=movie_id, categories=categories, title=title)
+                        for w in title.split():
+                            title_word_set.add(w.lower())
+
+                global MOVIE_TITLE_DICT
+                MOVIE_TITLE_DICT = dict()
+                for i, w in enumerate(title_word_set):
+                    MOVIE_TITLE_DICT[w] = i
+
+                global CATEGORIES_DICT
+                CATEGORIES_DICT = dict()
+                for i, c in enumerate(categories_set):
+                    CATEGORIES_DICT[c] = i
+
+                global USER_INFO
+                USER_INFO = dict()
+                with package.open('ml-1m/users.dat') as user_file:
+                    for line in user_file:
+                        uid, gender, age, job, _ = line.strip().split("::")
+                        USER_INFO[int(uid)] = UserInfo(
+                            index=uid, gender=gender, age=age, job_id=job)
+    return fn
+
+
+def __reader__(rand_seed=0, test_ratio=0.1, is_test=False):
+    fn = __initialize_meta_info__()
+    rand = random.Random(x=rand_seed)
+    with zipfile.ZipFile(file=fn) as package:
+        with package.open('ml-1m/ratings.dat') as rating:
+            for line in rating:
+                if (rand.random() < test_ratio) == is_test:
+                    uid, mov_id, rating, _ = line.strip().split("::")
+                    uid = int(uid)
+                    mov_id = int(mov_id)
+                    rating = float(rating) * 2 - 5.0
+
+                    mov = MOVIE_INFO[mov_id]
+                    usr = USER_INFO[uid]
+                    yield usr.value() + mov.value() + [[rating]]
+
+
+def __reader_creator__(**kwargs):
+    return lambda: __reader__(**kwargs)
+
+
+train = functools.partial(__reader_creator__, is_test=False)
+test = functools.partial(__reader_creator__, is_test=True)
+
+
+def get_movie_title_dict():
+    """
+    Get movie title dictionary.
+    """
+    __initialize_meta_info__()
+    return MOVIE_TITLE_DICT
+
+
+def __max_index_info__(a, b):
+    if a.index > b.index:
+        return a
+    else:
+        return b
+
+
+def max_movie_id():
+    """
+    Get the maximum value of movie id.
+    """
+    __initialize_meta_info__()
+    return reduce(__max_index_info__, MOVIE_INFO.viewvalues()).index
+
+
+def max_user_id():
+    """
+    Get the maximum value of user id.
+    """
+    __initialize_meta_info__()
+    return reduce(__max_index_info__, USER_INFO.viewvalues()).index
+
+
+def __max_job_id_impl__(a, b):
+    if a.job_id > b.job_id:
+        return a
+    else:
+        return b
+
+
+def max_job_id():
+    """
+    Get the maximum value of job id.
+    """
+    __initialize_meta_info__()
+    return reduce(__max_job_id_impl__, USER_INFO.viewvalues()).job_id
+
+
+def movie_categories():
+    """
+    Get movie categoriges dictionary.
+    """
+    __initialize_meta_info__()
+    return CATEGORIES_DICT
+
+
+def user_info():
+    """
+    Get user info dictionary.
+    """
+    __initialize_meta_info__()
+    return USER_INFO
+
+
+def movie_info():
+    """
+    Get movie info dictionary.
+    """
+    __initialize_meta_info__()
+    return MOVIE_INFO
+
+
+def unittest():
+    for train_count, _ in enumerate(train()()):
+        pass
+    for test_count, _ in enumerate(test()()):
+        pass
+
+    print train_count, test_count
+
+
+def fetch():
+    download(URL, "movielens", MD5)
+
+
+if __name__ == '__main__':
+    unittest()
diff --git a/python/paddle/v2/dataset/sentiment.py b/python/paddle/v2/dataset/sentiment.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dd34e7383fe2a290fcf61474914183a383e2b9c
--- /dev/null
+++ b/python/paddle/v2/dataset/sentiment.py
@@ -0,0 +1,131 @@
+# /usr/bin/env python
+# -*- coding:utf-8 -*-
+
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+The script fetch and preprocess movie_reviews data set that provided by NLTK
+
+TODO(yuyang18): Complete dataset.
+"""
+
+import collections
+from itertools import chain
+
+import nltk
+from nltk.corpus import movie_reviews
+
+import common
+
+__all__ = ['train', 'test', 'get_word_dict']
+NUM_TRAINING_INSTANCES = 1600
+NUM_TOTAL_INSTANCES = 2000
+
+
+def download_data_if_not_yet():
+    """
+    Download the data set, if the data set is not download.
+    """
+    try:
+        # make sure that nltk can find the data
+        if common.DATA_HOME not in nltk.data.path:
+            nltk.data.path.append(common.DATA_HOME)
+        movie_reviews.categories()
+    except LookupError:
+        print "Downloading movie_reviews data set, please wait....."
+        nltk.download('movie_reviews', download_dir=common.DATA_HOME)
+        print "Download data set success....."
+        print "Path is " + nltk.data.find('corpora/movie_reviews').path
+
+
+def get_word_dict():
+    """
+    Sorted the words by the frequency of words which occur in sample
+    :return:
+        words_freq_sorted
+    """
+    words_freq_sorted = list()
+    word_freq_dict = collections.defaultdict(int)
+    download_data_if_not_yet()
+
+    for category in movie_reviews.categories():
+        for field in movie_reviews.fileids(category):
+            for words in movie_reviews.words(field):
+                word_freq_dict[words] += 1
+    words_sort_list = word_freq_dict.items()
+    words_sort_list.sort(cmp=lambda a, b: b[1] - a[1])
+    for index, word in enumerate(words_sort_list):
+        words_freq_sorted.append((word[0], index))
+    return words_freq_sorted
+
+
+def sort_files():
+    """
+    Sorted the sample for cross reading the sample
+    :return:
+        files_list
+    """
+    files_list = list()
+    neg_file_list = movie_reviews.fileids('neg')
+    pos_file_list = movie_reviews.fileids('pos')
+    files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list)))
+    return files_list
+
+
+def load_sentiment_data():
+    """
+    Load the data set
+    :return:
+        data_set
+    """
+    data_set = list()
+    download_data_if_not_yet()
+    words_ids = dict(get_word_dict())
+    for sample_file in sort_files():
+        words_list = list()
+        category = 0 if 'neg' in sample_file else 1
+        for word in movie_reviews.words(sample_file):
+            words_list.append(words_ids[word.lower()])
+        data_set.append((words_list, category))
+    return data_set
+
+
+def reader_creator(data):
+    """
+    Reader creator, generate an iterator for data set
+    :param data:
+        train data set or test data set
+    """
+    for each in data:
+        yield each[0], each[1]
+
+
+def train():
+    """
+    Default training set reader creator
+    """
+    data_set = load_sentiment_data()
+    return reader_creator(data_set[0:NUM_TRAINING_INSTANCES])
+
+
+def test():
+    """
+    Default test set reader creator
+    """
+    data_set = load_sentiment_data()
+    return reader_creator(data_set[NUM_TRAINING_INSTANCES:])
+
+
+def fetch():
+    nltk.download('movie_reviews', download_dir=common.DATA_HOME)
diff --git a/python/paddle/v2/dataset/tests/cifar_test.py b/python/paddle/v2/dataset/tests/cifar_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0e18229da7818be5752ee592e094a00da286ad9
--- /dev/null
+++ b/python/paddle/v2/dataset/tests/cifar_test.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.v2.dataset.cifar
+import unittest
+
+
+class TestCIFAR(unittest.TestCase):
+    def check_reader(self, reader):
+        sum = 0
+        label = 0
+        for l in reader():
+            self.assertEqual(l[0].size, 3072)
+            if l[1] > label:
+                label = l[1]
+            sum += 1
+        return sum, label
+
+    def test_test10(self):
+        instances, max_label_value = self.check_reader(
+            paddle.v2.dataset.cifar.test10())
+        self.assertEqual(instances, 10000)
+        self.assertEqual(max_label_value, 9)
+
+    def test_train10(self):
+        instances, max_label_value = self.check_reader(
+            paddle.v2.dataset.cifar.train10())
+        self.assertEqual(instances, 50000)
+        self.assertEqual(max_label_value, 9)
+
+    def test_test100(self):
+        instances, max_label_value = self.check_reader(
+            paddle.v2.dataset.cifar.test100())
+        self.assertEqual(instances, 10000)
+        self.assertEqual(max_label_value, 99)
+
+    def test_train100(self):
+        instances, max_label_value = self.check_reader(
+            paddle.v2.dataset.cifar.train100())
+        self.assertEqual(instances, 50000)
+        self.assertEqual(max_label_value, 99)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/dataset/tests/common_test.py b/python/paddle/v2/dataset/tests/common_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..5babcef0eb4345d243904877d323c37d4889a643
--- /dev/null
+++ b/python/paddle/v2/dataset/tests/common_test.py
@@ -0,0 +1,37 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.v2.dataset.common
+import unittest
+import tempfile
+
+
+class TestCommon(unittest.TestCase):
+    def test_md5file(self):
+        _, temp_path = tempfile.mkstemp()
+        with open(temp_path, 'w') as f:
+            f.write("Hello\n")
+        self.assertEqual('09f7e02f1290be211da707a266f153b3',
+                         paddle.v2.dataset.common.md5file(temp_path))
+
+    def test_download(self):
+        yi_avatar = 'https://avatars0.githubusercontent.com/u/1548775?v=3&s=460'
+        self.assertEqual(
+            paddle.v2.dataset.common.DATA_HOME + '/test/1548775?v=3&s=460',
+            paddle.v2.dataset.common.download(
+                yi_avatar, 'test', 'f75287202d6622414c706c36c16f8e0d'))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/dataset/tests/imdb_test.py b/python/paddle/v2/dataset/tests/imdb_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4d82f26895d77d05c6e936bd636b1239e1a0cd8
--- /dev/null
+++ b/python/paddle/v2/dataset/tests/imdb_test.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.v2.dataset.imdb
+import unittest
+import re
+
+TRAIN_POS_PATTERN = re.compile("aclImdb/train/pos/.*\.txt$")
+TRAIN_NEG_PATTERN = re.compile("aclImdb/train/neg/.*\.txt$")
+TRAIN_PATTERN = re.compile("aclImdb/train/.*\.txt$")
+
+TEST_POS_PATTERN = re.compile("aclImdb/test/pos/.*\.txt$")
+TEST_NEG_PATTERN = re.compile("aclImdb/test/neg/.*\.txt$")
+TEST_PATTERN = re.compile("aclImdb/test/.*\.txt$")
+
+
+class TestIMDB(unittest.TestCase):
+    word_idx = None
+
+    def test_build_dict(self):
+        if self.word_idx == None:
+            self.word_idx = paddle.v2.dataset.imdb.build_dict(TRAIN_PATTERN,
+                                                              150)
+
+        self.assertEqual(len(self.word_idx), 7036)
+
+    def check_dataset(self, dataset, expected_size):
+        if self.word_idx == None:
+            self.word_idx = paddle.v2.dataset.imdb.build_dict(TRAIN_PATTERN,
+                                                              150)
+
+        sum = 0
+        for l in dataset(self.word_idx):
+            self.assertEqual(l[1], sum % 2)
+            sum += 1
+        self.assertEqual(sum, expected_size)
+
+    def test_train(self):
+        self.check_dataset(paddle.v2.dataset.imdb.train, 25000)
+
+    def test_test(self):
+        self.check_dataset(paddle.v2.dataset.imdb.test, 25000)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/dataset/tests/imikolov_test.py b/python/paddle/v2/dataset/tests/imikolov_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..009e55243a594e5e235c36fb0223ec70754d17f3
--- /dev/null
+++ b/python/paddle/v2/dataset/tests/imikolov_test.py
@@ -0,0 +1,26 @@
+import paddle.v2.dataset.imikolov
+import unittest
+
+WORD_DICT = paddle.v2.dataset.imikolov.build_dict()
+
+
+class TestMikolov(unittest.TestCase):
+    def check_reader(self, reader, n):
+        for l in reader():
+            self.assertEqual(len(l), n)
+
+    def test_train(self):
+        n = 5
+        self.check_reader(paddle.v2.dataset.imikolov.train(WORD_DICT, n), n)
+
+    def test_test(self):
+        n = 5
+        self.check_reader(paddle.v2.dataset.imikolov.test(WORD_DICT, n), n)
+
+    def test_total(self):
+        _, idx = zip(*WORD_DICT.items())
+        self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/dataset/tests/mnist_test.py b/python/paddle/v2/dataset/tests/mnist_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d344cac3e7483a351033570fbec75a4d19f4a55
--- /dev/null
+++ b/python/paddle/v2/dataset/tests/mnist_test.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.v2.dataset.mnist
+import unittest
+
+
+class TestMNIST(unittest.TestCase):
+    def check_reader(self, reader):
+        sum = 0
+        label = 0
+        for l in reader():
+            self.assertEqual(l[0].size, 784)
+            if l[1] > label:
+                label = l[1]
+            sum += 1
+        return sum, label
+
+    def test_train(self):
+        instances, max_label_value = self.check_reader(
+            paddle.v2.dataset.mnist.train())
+        self.assertEqual(instances, 60000)
+        self.assertEqual(max_label_value, 9)
+
+    def test_test(self):
+        instances, max_label_value = self.check_reader(
+            paddle.v2.dataset.mnist.test())
+        self.assertEqual(instances, 10000)
+        self.assertEqual(max_label_value, 9)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/dataset/tests/test_sentiment.py b/python/paddle/v2/dataset/tests/test_sentiment.py
new file mode 100644
index 0000000000000000000000000000000000000000..407405290734609059c1767600748d530e8a13a6
--- /dev/null
+++ b/python/paddle/v2/dataset/tests/test_sentiment.py
@@ -0,0 +1,55 @@
+# /usr/bin/env python
+# -*- coding:utf-8 -*-
+
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import nltk
+import paddle.v2.dataset.sentiment as st
+from nltk.corpus import movie_reviews
+
+
+class TestSentimentMethods(unittest.TestCase):
+    def test_get_word_dict(self):
+        word_dict = st.get_word_dict()[0:10]
+        test_word_list = [(u',', 0), (u'the', 1), (u'.', 2), (u'a', 3),
+                          (u'and', 4), (u'of', 5), (u'to', 6), (u"'", 7),
+                          (u'is', 8), (u'in', 9)]
+        for idx, each in enumerate(word_dict):
+            self.assertEqual(each, test_word_list[idx])
+        self.assertTrue("/root/.cache/paddle/dataset" in nltk.data.path)
+
+    def test_sort_files(self):
+        last_label = ''
+        for sample_file in st.sort_files():
+            current_label = sample_file.split("/")[0]
+            self.assertNotEqual(current_label, last_label)
+            last_label = current_label
+
+    def test_data_set(self):
+        data_set = st.load_sentiment_data()
+        last_label = -1
+        for each in st.test():
+            self.assertNotEqual(each[1], last_label)
+            last_label = each[1]
+        self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES)
+        self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES)
+        self.assertEqual(
+            len(list(st.test())),
+            (st.NUM_TOTAL_INSTANCES - st.NUM_TRAINING_INSTANCES))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/dataset/uci_housing.py b/python/paddle/v2/dataset/uci_housing.py
new file mode 100644
index 0000000000000000000000000000000000000000..3469fd9ce12dd4d934004f90286979b73048a5c8
--- /dev/null
+++ b/python/paddle/v2/dataset/uci_housing.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+UCI Housing dataset.
+
+This module will download dataset from
+https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
+parse training set and test set into paddle reader creators.
+"""
+
+import numpy as np
+import os
+from common import download
+
+__all__ = ['train', 'test']
+
+URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
+MD5 = 'd4accdce7a25600298819f8e28e8d593'
+feature_names = [
+    'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
+    'PTRATIO', 'B', 'LSTAT'
+]
+
+UCI_TRAIN_DATA = None
+UCI_TEST_DATA = None
+
+
+def feature_range(maximums, minimums):
+    import matplotlib
+    matplotlib.use('Agg')
+    import matplotlib.pyplot as plt
+    fig, ax = plt.subplots()
+    feature_num = len(maximums)
+    ax.bar(range(feature_num), maximums - minimums, color='r', align='center')
+    ax.set_title('feature scale')
+    plt.xticks(range(feature_num), feature_names)
+    plt.xlim([-1, feature_num])
+    fig.set_figheight(6)
+    fig.set_figwidth(10)
+    if not os.path.exists('./image'):
+        os.makedirs('./image')
+    fig.savefig('image/ranges.png', dpi=48)
+    plt.close(fig)
+
+
+def load_data(filename, feature_num=14, ratio=0.8):
+    global UCI_TRAIN_DATA, UCI_TEST_DATA
+    if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
+        return
+
+    data = np.fromfile(filename, sep=' ')
+    data = data.reshape(data.shape[0] / feature_num, feature_num)
+    maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
+        axis=0) / data.shape[0]
+    feature_range(maximums[:-1], minimums[:-1])
+    for i in xrange(feature_num - 1):
+        data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
+    offset = int(data.shape[0] * ratio)
+    UCI_TRAIN_DATA = data[:offset]
+    UCI_TEST_DATA = data[offset:]
+
+
+def train():
+    """
+    UCI_HOUSING training set creator.
+
+    It returns a reader creator, each sample in the reader is features after
+    normalization and price number.
+
+    :return: Training reader creator
+    :rtype: callable
+    """
+    global UCI_TRAIN_DATA
+    load_data(download(URL, 'uci_housing', MD5))
+
+    def reader():
+        for d in UCI_TRAIN_DATA:
+            yield d[:-1], d[-1:]
+
+    return reader
+
+
+def test():
+    """
+    UCI_HOUSING test set creator.
+
+    It returns a reader creator, each sample in the reader is features after
+    normalization and price number.
+
+    :return: Test reader creator
+    :rtype: callable
+    """
+    global UCI_TEST_DATA
+    load_data(download(URL, 'uci_housing', MD5))
+
+    def reader():
+        for d in UCI_TEST_DATA:
+            yield d[:-1], d[-1:]
+
+    return reader
+
+
+def fetch():
+    download(URL, 'uci_housing', MD5)
diff --git a/python/paddle/v2/dataset/wmt14.py b/python/paddle/v2/dataset/wmt14.py
new file mode 100644
index 0000000000000000000000000000000000000000..0902f87741c342b237439081703081b467dc6f35
--- /dev/null
+++ b/python/paddle/v2/dataset/wmt14.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+WMT14 dataset.
+The original WMT14 dataset is too large and a small set of data for set is
+provided. This module will download dataset from
+http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and
+parse training set and test set into paddle reader creators.
+
+"""
+import tarfile
+import gzip
+
+from paddle.v2.dataset.common import download
+from paddle.v2.parameters import Parameters
+
+__all__ = ['train', 'test', 'build_dict']
+
+URL_DEV_TEST = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz'
+MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5'
+# this is a small set of data for test. The original data is too large and will be add later.
+URL_TRAIN = 'http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz'
+MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c'
+# this is the pretrained model, whose bleu = 26.92
+URL_MODEL = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_14/wmt14_model.tar.gz'
+MD5_MODEL = '4ce14a26607fb8a1cc23bcdedb1895e4'
+
+START = ""
+END = ""
+UNK = ""
+UNK_IDX = 2
+
+
+def __read_to_dict__(tar_file, dict_size):
+    def __to_dict__(fd, size):
+        out_dict = dict()
+        for line_count, line in enumerate(fd):
+            if line_count < size:
+                out_dict[line.strip()] = line_count
+            else:
+                break
+        return out_dict
+
+    with tarfile.open(tar_file, mode='r') as f:
+        names = [
+            each_item.name for each_item in f
+            if each_item.name.endswith("src.dict")
+        ]
+        assert len(names) == 1
+        src_dict = __to_dict__(f.extractfile(names[0]), dict_size)
+        names = [
+            each_item.name for each_item in f
+            if each_item.name.endswith("trg.dict")
+        ]
+        assert len(names) == 1
+        trg_dict = __to_dict__(f.extractfile(names[0]), dict_size)
+        return src_dict, trg_dict
+
+
+def reader_creator(tar_file, file_name, dict_size):
+    def reader():
+        src_dict, trg_dict = __read_to_dict__(tar_file, dict_size)
+        with tarfile.open(tar_file, mode='r') as f:
+            names = [
+                each_item.name for each_item in f
+                if each_item.name.endswith(file_name)
+            ]
+            for name in names:
+                for line in f.extractfile(name):
+                    line_split = line.strip().split('\t')
+                    if len(line_split) != 2:
+                        continue
+                    src_seq = line_split[0]  # one source sequence
+                    src_words = src_seq.split()
+                    src_ids = [
+                        src_dict.get(w, UNK_IDX)
+                        for w in [START] + src_words + [END]
+                    ]
+
+                    trg_seq = line_split[1]  # one target sequence
+                    trg_words = trg_seq.split()
+                    trg_ids = [trg_dict.get(w, UNK_IDX) for w in trg_words]
+
+                    # remove sequence whose length > 80 in training mode
+                    if len(src_ids) > 80 or len(trg_ids) > 80:
+                        continue
+                    trg_ids_next = trg_ids + [trg_dict[END]]
+                    trg_ids = [trg_dict[START]] + trg_ids
+
+                    yield src_ids, trg_ids, trg_ids_next
+
+    return reader
+
+
+def train(dict_size):
+    """
+    WMT14 training set creator.
+
+    It returns a reader creator, each sample in the reader is source language
+    word ID sequence, target language word ID sequence and next word ID
+    sequence.
+
+    :return: Training reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'train/train', dict_size)
+
+
+def test(dict_size):
+    """
+    WMT14 test set creator.
+
+    It returns a reader creator, each sample in the reader is source language
+    word ID sequence, target language word ID sequence and next word ID
+    sequence.
+
+    :return: Test reader creator
+    :rtype: callable
+    """
+    return reader_creator(
+        download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'test/test', dict_size)
+
+
+def gen(dict_size):
+    return reader_creator(
+        download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'gen/gen', dict_size)
+
+
+def model():
+    tar_file = download(URL_MODEL, 'wmt14', MD5_MODEL)
+    with gzip.open(tar_file, 'r') as f:
+        parameters = Parameters.from_tar(f)
+    return parameters
+
+
+def get_dict(dict_size, reverse=True):
+    # if reverse = False, return dict = {'a':'001', 'b':'002', ...}
+    # else reverse = true, return dict = {'001':'a', '002':'b', ...}
+    tar_file = download(URL_TRAIN, 'wmt14', MD5_TRAIN)
+    src_dict, trg_dict = __read_to_dict__(tar_file, dict_size)
+    if reverse:
+        src_dict = {v: k for k, v in src_dict.items()}
+        trg_dict = {v: k for k, v in trg_dict.items()}
+    return src_dict, trg_dict
+
+
+def fetch():
+    download(URL_TRAIN, 'wmt14', MD5_TRAIN)
+    download(URL_MODEL, 'wmt14', MD5_MODEL)
diff --git a/python/paddle/v2/evaluator.py b/python/paddle/v2/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..588eefa3912799aa55f970c6d7e013ed7779ec9a
--- /dev/null
+++ b/python/paddle/v2/evaluator.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.trainer_config_helpers.evaluators as evs
+import inspect
+from config_base import __convert_to_v2__
+
+__all__ = []
+
+
+def initialize():
+    def convert_to_new_name(nm):
+        return nm[:-len("_evaluator")]
+
+    for __ev_name__ in filter(lambda x: x.endswith('_evaluator'), evs.__all__):
+        __ev__ = getattr(evs, __ev_name__)
+        if hasattr(__ev__, 'argspec'):
+            argspec = __ev__.argspec
+        else:
+            argspec = inspect.getargspec(__ev__)
+        parent_names = filter(lambda x: x in ['input', 'label', 'weight'],
+                              argspec.args)
+        v2_ev = __convert_to_v2__(
+            __ev_name__,
+            parent_names=parent_names,
+            is_default_name='name' in argspec.args,
+            attach_parent=True)
+
+        __new_name__ = convert_to_new_name(__ev_name__)
+
+        globals()[__new_name__] = v2_ev
+        globals()[__new_name__].__name__ = __new_name__
+        __all__.append(__new_name__)
+
+
+initialize()
diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py
index a16cfa91f062a60a141ea8fa962b3ecf6f5f0a22..fd6050fa339d280ad54e40128ea6bae25132c873 100644
--- a/python/paddle/v2/event.py
+++ b/python/paddle/v2/event.py
@@ -1,26 +1,83 @@
 """
-All training events.
+Testing and training events.
 
 There are:
 
-* BeginTraining
-* EndTraining
+* TestResult
 * BeginIteration
 * EndIteration
 * BeginPass
 * EndPass
-
-TODO(yuyang18): Complete it!
 """
-__all__ = ['EndIteration']
+import py_paddle.swig_paddle as api
+
+__all__ = [
+    'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult'
+]
+
+
+class WithMetric(object):
+    def __init__(self, evaluator):
+        if not isinstance(evaluator, api.Evaluator):
+            raise TypeError("Evaluator should be api.Evaluator type")
+        self.__evaluator__ = evaluator
+
+    @property
+    def metrics(self):
+        names = self.__evaluator__.getNames()
+        retv = dict()
+        for each_name in names:
+            val = self.__evaluator__.getValue(each_name)
+            retv[each_name] = val
+        return retv
+
+
+class TestResult(WithMetric):
+    """
+    Result that trainer.test return.
+    """
+
+    def __init__(self, evaluator, cost):
+        super(TestResult, self).__init__(evaluator)
+        self.cost = cost
+
+
+class BeginPass(object):
+    """
+    Event On One Pass Training Start.
+    """
+
+    def __init__(self, pass_id):
+        self.pass_id = pass_id
+
+
+class EndPass(WithMetric):
+    """
+    Event On One Pass Training Complete.
+    """
+
+    def __init__(self, pass_id, evaluator):
+        self.pass_id = pass_id
+        WithMetric.__init__(self, evaluator)
+
+
+class BeginIteration(object):
+    """
+    Event On One Batch Training Start.
+    """
+
+    def __init__(self, pass_id, batch_id):
+        self.pass_id = pass_id
+        self.batch_id = batch_id
 
 
-class EndIteration(object):
+class EndIteration(WithMetric):
     """
     Event On One Batch Training Complete.
     """
 
-    def __init__(self, pass_id, batch_id, cost):
+    def __init__(self, pass_id, batch_id, cost, evaluator):
         self.pass_id = pass_id
         self.batch_id = batch_id
         self.cost = cost
+        WithMetric.__init__(self, evaluator)
diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..c178336303f53769863063922868cd2a22e4b957
--- /dev/null
+++ b/python/paddle/v2/inference.py
@@ -0,0 +1,106 @@
+import numpy
+import py_paddle.swig_paddle as api
+import collections
+import topology
+import minibatch
+from data_feeder import DataFeeder
+
+__all__ = ['infer']
+
+
+class Inference(object):
+    """
+    Inference combines neural network output and parameters together
+    to do inference.
+
+    :param outptut_layer: The neural network that should be inferenced.
+    :type output_layer: paddle.v2.config_base.Layer or the sequence
+                        of paddle.v2.config_base.Layer
+    :param parameters: The parameters dictionary.
+    :type parameters: paddle.v2.parameters.Parameters
+    """
+
+    def __init__(self, output_layer, parameters):
+        topo = topology.Topology(output_layer)
+        gm = api.GradientMachine.createFromConfigProto(
+            topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE])
+        for param in gm.getParameters():
+            val = param.getBuf(api.PARAMETER_VALUE)
+            name = param.getName()
+            assert isinstance(val, api.Vector)
+            val.copyFromNumpyArray(parameters.get(name).flatten())
+        self.__gradient_machine__ = gm
+        self.__data_types__ = topo.data_type()
+
+    def iter_infer(self, input, feeding=None):
+        feeder = DataFeeder(self.__data_types__, feeding)
+        batch_size = len(input)
+
+        def __reader_impl__():
+            for each_sample in input:
+                yield each_sample
+
+        reader = minibatch.batch(__reader_impl__, batch_size=batch_size)
+
+        self.__gradient_machine__.start()
+        for data_batch in reader():
+            yield self.__gradient_machine__.forwardTest(feeder(data_batch))
+        self.__gradient_machine__.finish()
+
+    def iter_infer_field(self, field, **kwargs):
+        if not isinstance(field, list) and not isinstance(field, tuple):
+            field = [field]
+
+        for result in self.iter_infer(**kwargs):
+            for each_result in result:
+                item = [each_result[each_field] for each_field in field]
+                yield item
+
+    def infer(self, field='value', **kwargs):
+        retv = None
+        for result in self.iter_infer_field(field=field, **kwargs):
+            if retv is None:
+                retv = [[] for i in xrange(len(result))]
+            for i, item in enumerate(result):
+                retv[i].append(item)
+        retv = [numpy.concatenate(out) for out in retv]
+        if len(retv) == 1:
+            return retv[0]
+        else:
+            return retv
+
+
+def infer(output_layer, parameters, input, feeding=None, field='value'):
+    """
+    Infer a neural network by given neural network output and parameters.  The
+    user should pass either a batch of input data or reader method.
+
+    Example usages:
+
+    ..  code-block:: python
+
+        result = paddle.infer(prediction, parameters, input=SomeData,
+                              batch_size=32)
+        print result
+
+    :param output_layer: output of the neural network that would be inferred
+    :type output_layer: paddle.v2.config_base.Layer
+    :param parameters: parameters of the neural network.
+    :type parameters: paddle.v2.parameters.Parameters
+    :param input: input data batch. Should be a python iterable object, and each
+                  element is the data batch.
+    :type input: collections.Iterable
+    :param feeding: Reader dictionary. Default could generate from input
+                        value.
+    :param field: The prediction field. It should in [`value`, `id`, `prob`]. 
+                  `value` and `prob` mean return the prediction probabilities, 
+                  `id` means return the prediction labels. Default is `value`.
+                  Note that `prob` only used when output_layer is beam_search 
+                  or max_id.
+    :type field: str
+    :return: a numpy array
+    :rtype: numpy.ndarray
+    """
+
+    inferer = Inference(output_layer=output_layer, parameters=parameters)
+    return inferer.infer(field=field, input=input, feeding=feeding)
diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py
index 3920d4a08fc6271816d35bbb234e47ab3b93d3c9..384de9b9d57f88e84ab6067846174bb037502dc0 100644
--- a/python/paddle/v2/layer.py
+++ b/python/paddle/v2/layer.py
@@ -12,246 +12,594 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 """
-Before this new package paddle.v2.layer, users would need to use functions
-in paddle.trainer_config_helpers.layers to configure networks.
-
-The Old Way:
-=========
-This old way requires that the creation of a network be defined in a Python
-function, say network_config, and that this Python function being passed to
-paddle.trainer_config_helpers.parse_network_config for the creation of
-protobuf message description of this network.
-
-```python
-def network_config():
-  img = paddle.trainer_config_helpers.data_layer(name="pixel", size=784)
-  inference = paddle.trainer_config_helpers.fc_layer(
-    input=img,
-    size=10,
-    act=paddle.trainer_config_helpers.SoftmaxActivation())
-  cost = paddle.trainer_config_helpers.classification_cost(
-    input=inference,
-    label=paddle.trainer_config_helpers.data_layer(name="label", size=10))
-
-proto_desc = parse_network_config(network_config)
-```
-
-When parse_network_config executes network_config, those layer definition
-functions like data_layer and fc_layer would change some Python global variables,
-so that after the execution, parse_network_config could collect information from
-these global variables and generates the protobuf message.
-
-
-
-The New Way:
-=========
-In this PR, we define a function in paddle.v2.layer which creates a Python
-class for each layer creation function in paddle.trainer_config_helpers.layers.
-Users can use create a network as follows:
-
-```python
-img = paddle.v2.layer.data(name="pixel", size=784)
-inference = paddle.v2.layer.fc(input=img, size=10, act=paddle.v2.layer.Softmax())
-cost = paddle.v2.layer.classification(
-  input=inference,
-  label=paddle.v2.layer.data(name="label", size=10))
-
-parameters = paddle.v2.parameters.create(cost)
-```
-
-This new way doesn't require those invocations to layer definition functions
-to be in a Python function but could be anywhere.
-
-Also, the creation of a protobuf message is hidden in the invocation of
-paddle.v2.parameters.create, no longer exposed to users.
+`paddle.v2.layer` is a part of model config packages in paddle.v2. In API v2,
+we want to make Paddle a plain Python package. The model config package defined
+the way how to configure a neural network topology in Paddle Python code.
+
+The primary usage shows below.
+
+..  code-block:: python
+
+    import paddle.v2 as paddle
+
+    img = paddle.layer.data(name='img', type=paddle.data_type.dense_vector(784))
+    hidden = paddle.layer.fc(input=img, size=200)
+    prediction = paddle.layer.fc(input=hidden, size=10,
+                                 act=paddle.activation.Softmax())
+
+    # use prediction instance where needed.
+    parameters = paddle.parameters.create(cost)
 """
 
 import collections
+import inspect
+import re
 
 import paddle.trainer_config_helpers as conf_helps
+from paddle.trainer.config_parser import \
+    RecurrentLayerGroupWithoutOutLinksBegin, RecurrentLayerGroupSetOutLink, \
+    RecurrentLayerGroupEnd, model_type
 from paddle.trainer_config_helpers.config_parser_utils import \
     parse_network_config as __parse__
+from paddle.trainer_config_helpers.default_decorators import wrap_act_default
+from paddle.trainer_config_helpers.default_decorators import \
+    wrap_bias_attr_default
 from paddle.trainer_config_helpers.default_decorators import wrap_name_default
+from paddle.trainer_config_helpers.layers import RecurrentLayerGroupSetGenerator, Generator
+from paddle.trainer_config_helpers.layers import layer_support
 
-import data_type
 import activation
 import attr
+import data_type
+from config_base import Layer, __convert_to_v2__
 
-__all__ = [
-    'parse_network', 'data', 'fc', 'max_id', 'classification_cost',
-    'cross_entropy_cost', 'cross_entropy_with_selfnorm_cost', 'regression_cost',
-    'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost',
-    'sum_cost', 'huber_cost'
-]
+__all__ = ['parse_network', 'data']
 
 
-def parse_network(*outputs):
+def parse_network(output_layers, extra_layers=None):
     """
-    parse all output layers and then generate a model config proto.
-    :param outputs:
-    :return:
+    Parse all layers in the neural network graph and
+    then generate a ModelConfig object.
+
+    ..  note::
+
+        This function is used internally in paddle.v2 module. User should never
+        invoke this method.
+
+    :param output_layers: Output layers.
+    :type output_layers: Layer
+    :param extra_layers: Some layers in the neural network graph are not in the
+                         path of output_layers.
+    :type extra_layers: Layer
+    :return: A ModelConfig object instance.
+    :rtype: ModelConfig
     """
+    if not isinstance(output_layers, collections.Sequence):
+        output_layers = [output_layers]
+    if extra_layers is not None and not isinstance(extra_layers,
+                                                   collections.Sequence):
+        extra_layers = [extra_layers]
 
     def __real_func__():
+        """
+        __real_func__ is the function that config_parser.parse invoked. It is
+        the plain old paddle configuration function.
+        """
         context = dict()
-        real_output = [each.to_proto(context=context) for each in outputs]
+        real_output = [each.to_proto(context=context) for each in output_layers]
+        if extra_layers is not None:
+            extra_output = [
+                each.to_proto(context=context) for each in extra_layers
+            ]
         conf_helps.outputs(real_output)
 
     return __parse__(__real_func__)
 
 
-class Layer(object):
-    def __init__(self, name, parent_layers):
-        assert isinstance(parent_layers, dict)
-        assert isinstance(name, basestring)
-        self.name = name
-        self.__parent_layers__ = parent_layers
+"""
+Some layer may need some special config, and can not use __convert_to_v2__ to convert.
+So we also need to implement some special LayerV2.
+"""
+
+
+class DataLayerV2(Layer):
+    METHOD_NAME = 'data_layer'
+
+    def __init__(self, name, type, **kwargs):
+        assert isinstance(type, data_type.InputType)
+
+        self.type = type
+        self.__method_name__ = 'data_layer'
+        self.__kwargs__ = kwargs
+
+        super(DataLayerV2, self).__init__(name=name, parent_layers=dict())
 
-    def to_proto(self, context):
+    def to_proto_impl(self, **kwargs):
+        args = dict()
+        args['size'] = self.type.dim
+        for each in kwargs:
+            args[each] = kwargs[each]
+        for each in self.__kwargs__:
+            args[each] = self.__kwargs__[each]
+        return getattr(conf_helps, self.__method_name__)(name=self.name, **args)
+
+    def __map_docstr__(doc):
+        doc = re.sub(r'(data = [^\)]+)\).*',
+                     "data = paddle.layer.data(name=\"input\", "
+                     "type=paddle.data_type.dense_vector(1000))", doc)
+
+        doc = re.sub(r':param size:.*',
+                     ':param type: Data type of this data layer', doc)
+        doc = re.sub(r':type size:.*',
+                     ":type size: paddle.v2.data_type.InputType", doc)
+        return doc
+
+
+class MemoryV2(Layer):
+    def __init__(self, name, extra_input=None, **kwargs):
         """
-        function to set proto attribute
+        Init memory object, if memory is inited inside recurrent_group step
+        function, it may depend on a boot_layer that should be initialized
+        outside recurrent_group, so we:
+            1. add RecurrentLayerInput to extra_parent of self.
+            2. add boot_layer to the extra_parent of RecurrentLayerInput.
+
+        :param extra_input: list of RecurrentLayerInput
+        :type extra_input: [RecurrentLayerInput]
         """
-        kwargs = dict()
-        for layer_name in self.__parent_layers__:
-            if not isinstance(self.__parent_layers__[layer_name],
-                              collections.Sequence):
-                v1_layer = self.__parent_layers__[layer_name].to_proto(
-                    context=context)
+        self.name = name
+        super(MemoryV2, self).__init__(name=name, parent_layers=dict())
+        self.__kwargs__ = kwargs
+        self.__boot_layer_name__ = None
+
+        if 'boot_layer' in kwargs:
+            begin_of_current_rnn = []
+            # TODO(yuyang18): Fix inspect, it could be wrong when user invoke a
+            # function inside step.
+            st = inspect.stack()
+            for i in xrange(len(st)):
+                locs = inspect.stack()[i][0].f_locals
+                keys = locs.keys()
+                for key in keys:
+                    val = locs[key]
+                    if isinstance(val, RecurrentLayerInput):
+                        begin_of_current_rnn.append(val)
+                    elif isinstance(val, collections.Sequence):
+                        for v in val:
+                            if isinstance(v, RecurrentLayerInput):
+                                begin_of_current_rnn.append(v)
+
+                if begin_of_current_rnn:
+                    break
+            assert begin_of_current_rnn is not None
+            for extra in begin_of_current_rnn:
+                self.append_extra_parent(extra)
+                extra.append_extra_parent(kwargs['boot_layer'])
+                self.__boot_layer_name__ = kwargs['boot_layer'].name
+
+    def to_proto_impl(self, **kwargs):
+        args = dict()
+        for each in kwargs:
+            args[each] = kwargs[each]
+        for each in self.__kwargs__:
+            args[each] = self.__kwargs__[each]
+
+        if self.__boot_layer_name__ is not None:
+            args['boot_layer'] = self.__context__[self.__boot_layer_name__]
+
+        size = args.get('size', None)
+        if size is not None:
+            if callable(size):
+                real_size = size()
             else:
-                v1_layer = map(lambda x: x.to_proto(context=context),
-                               self.__parent_layers__[layer_name])
-            kwargs[layer_name] = v1_layer
+                real_size = size
+            args['size'] = real_size
+        return conf_helps.memory(name=self.name, **args)
 
-        if self.name not in context:
-            context[self.name] = self.to_proto_impl(**kwargs)
-        return context[self.name]
+    def context_name(self):
+        return self.name + "#memory"
 
-    def to_proto_impl(self, **kwargs):
+    def use_context_name(self):
+        """
+        memory layer will have the same name with some layer
+        :return:
+        """
+        return True
+
+
+class StaticInputV2(object):
+    def __init__(self, input, is_seq=False, size=None):
+        assert isinstance(input, LayerV2)
+        self.name = input.name
+        self.input = input
+        self.is_seq = is_seq
+        self.size = size
+        # TODO(add size check)
+        # assert input.size is not None or size is not None
+
+
+class BaseGeneratedInputV2(object):
+    def __init__(self):
+        self.bos_id = None
+        self.eos_id = None
+
+    def before_real_step(self):
         raise NotImplementedError()
 
+    def after_real_step(self, *args):
+        raise NotImplementedError()
 
-def __convert_to_v2__(method_name, name_prefix, parent_names):
-    if name_prefix is not None:
-        wrapper = wrap_name_default(name_prefix=name_prefix)
-    else:
-        wrapper = None
 
-    class V2LayerImpl(Layer):
-        def __init__(self, name=None, **kwargs):
-            parent_layers = dict()
-            other_kwargs = dict()
-            for pname in parent_names:
-                if kwargs.has_key(pname):
-                    parent_layers[pname] = kwargs[pname]
+class GeneratedInputV2(BaseGeneratedInputV2):
+    def __init__(self, size, embedding_name, embedding_size):
+        super(GeneratedInputV2, self).__init__()
+        self.size = size
+        self.embedding_name = embedding_name
+        self.embedding_size = embedding_size
 
-            for key in kwargs.keys():
-                if key not in parent_names:
-                    other_kwargs[key] = kwargs[key]
+    def after_real_step(self, input):
+        return max_id(input=input, name='__beam_search_predict__')
 
-            super(V2LayerImpl, self).__init__(name, parent_layers)
-            self.__other_kwargs__ = other_kwargs
+    def before_real_step(self):
+        predict_id = memory(
+            name='__beam_search_predict__',
+            size=self.size,
+            boot_with_const_id=self.bos_id)
 
-        if wrapper is not None:
-            __init__ = wrapper(__init__)
+        trg_emb = embedding(
+            input=predict_id,
+            size=self.embedding_size,
+            param_attr=attr.ParamAttr(name=self.embedding_name))
+        return trg_emb
 
-        def to_proto_impl(self, **kwargs):
-            args = dict()
-            for each in kwargs:
-                args[each] = kwargs[each]
-            for each in self.__other_kwargs__:
-                args[each] = self.__other_kwargs__[each]
-            return getattr(conf_helps, method_name)(name=self.name, **args)
 
-    return V2LayerImpl
+class RecurrentLayerGroupSetGeneratorV2(Layer):
+    def __init__(self, eos_name, max_length, beam_size, num_results_per_sample):
+        self.eos_name = eos_name
+        self.max_length = max_length
+        self.beam_size = beam_size
+        self.num_results_per_sample = num_results_per_sample
+        super(RecurrentLayerGroupSetGeneratorV2, self).__init__(
+            name=eos_name, parent_layers={})
 
+    def to_proto_impl(self, **kwargs):
+        RecurrentLayerGroupSetGenerator(
+            Generator(
+                eos_layer_name=self.eos_name,
+                max_num_frames=self.max_length,
+                beam_size=self.beam_size,
+                num_results_per_sample=self.num_results_per_sample))
+        return self
 
-"""
-Some layer may need some special config, and can not use __convert_to_v2__ to convert.
-So we also need to implement some special LayerV2.
-"""
+    def context_name(self):
+        return self.eos_name + ".fake"
 
+    def use_context_name(self):
+        return True
 
-class DataLayerV2(Layer):
-    def __init__(self, name, type, **kwargs):
-        assert isinstance(type, data_type.InputType)
 
-        self.type = type
-        self.__method_name__ = 'data_layer'
-        self.__kwargs__ = kwargs
+class MixedLayerV2(Layer):
+    """
+    This class is use to support `with` grammar. If not, the following code
+    could convert mixed_layer simply.
 
-        super(DataLayerV2, self).__init__(name=name, parent_layers=dict())
+        mixed = __convert_to_v2__(
+            'mixed_layer', name_prefix='mixed', parent_names=['input'])
+    """
+
+    class AddToSealedMixedLayerExceptionV2(Exception):
+        pass
+
+    def __init__(self,
+                 size=0,
+                 input=None,
+                 name=None,
+                 act=None,
+                 bias_attr=None,
+                 layer_attr=None):
+        self.__method_name__ = 'mixed_layer'
+        self.finalized = False
+        self.__inputs__ = []
+        if input is not None:
+            self.__inputs__ = input
+
+        other_kwargs = dict()
+        other_kwargs['name'] = name
+        other_kwargs['size'] = size
+        other_kwargs['act'] = act
+        other_kwargs['bias_attr'] = bias_attr
+        other_kwargs['layer_attr'] = layer_attr
+        parent_layers = {"input": self.__inputs__}
+        super(MixedLayerV2, self).__init__(name, parent_layers)
+        self.__other_kwargs__ = other_kwargs
+
+    def __iadd__(self, other):
+        if not self.finalized:
+            self.__inputs__.append(other)
+            return self
+        else:
+            raise MixedLayerV2.AddToSealedMixedLayerExceptionV2()
+
+    def __enter__(self):
+        assert len(self.__inputs__) == 0
+        return self
+
+    def __exit__(self, *args, **kwargs):
+        self.finalized = True
 
     def to_proto_impl(self, **kwargs):
         args = dict()
-        args['size'] = self.type.dim
         for each in kwargs:
             args[each] = kwargs[each]
-        for each in self.__kwargs__:
-            args[each] = self.__kwargs__[each]
-        return getattr(conf_helps, self.__method_name__)(name=self.name, **args)
+        for each in self.__other_kwargs__:
+            args[each] = self.__other_kwargs__[each]
+        size = args.get('size', None)
+        if size is not None:
+            if callable(size):
+                real_size = size()
+            else:
+                real_size = size
+            args['size'] = real_size
+        return getattr(conf_helps, self.__method_name__)(**args)
+
+
+@wrap_name_default("mixed")
+@wrap_act_default(act=activation.Linear())
+@wrap_bias_attr_default(has_bias=False)
+@layer_support(conf_helps.layers.ERROR_CLIPPING, conf_helps.layers.DROPOUT)
+def mixed(size=0,
+          name=None,
+          input=None,
+          act=None,
+          bias_attr=False,
+          layer_attr=None):
+    return MixedLayerV2(size, input, name, act, bias_attr, layer_attr)
+
+
+class RecurrentLayerInput(Layer):
+    def __init__(self, recurrent_name, index, parent_layers):
+        parents_len = len(parent_layers)
+        assert parents_len <= 1
+        if parents_len == 0:
+            self.__parents__ = []
+        else:
+            self.__parents__ = parent_layers.values()[0]
+        self.__recurrent_name__ = recurrent_name
+        name = self.__parents__[
+            index].name if index >= 0 else self.context_name()
+        super(RecurrentLayerInput, self).__init__(
+            name=name, parent_layers=parent_layers)
+
+    def context_name(self):
+        return self.__recurrent_name__ + ".begin"
+
+    def to_proto_impl(self, **kwargs):
+        model_type('recurrent_nn')
+        RecurrentLayerGroupWithoutOutLinksBegin(
+            name=self.__recurrent_name__,
+            in_links=map(lambda x: x.name, self.__parents__))
+        return self
+
+
+class RecurrentLayerOutput(Layer):
+    def __init__(self, recurrent_name, index, parent_layers):
+        assert len(parent_layers) == 1
+        self.__parents__ = parent_layers.values()[0]
+        super(RecurrentLayerOutput, self).__init__(
+            name=self.__parents__[index].name, parent_layers=parent_layers)
+        self.__recurrent_name__ = recurrent_name
 
+    def context_name(self):
+        return self.__recurrent_name__ + ".end"
 
+    def to_proto_impl(self, **kwargs):
+        for l in self.__parents__:
+            RecurrentLayerGroupSetOutLink(l.name)
+        RecurrentLayerGroupEnd(name=self.__recurrent_name__)
+
+
+LayerV2 = Layer
 data = DataLayerV2
-fc = __convert_to_v2__('fc_layer', name_prefix='fc', parent_names=['input'])
-max_id = __convert_to_v2__(
-    'maxid_layer', name_prefix='maxid', parent_names=['input'])
-classification_cost = __convert_to_v2__(
-    'classification_cost',
-    name_prefix='classification_cost',
-    parent_names=['input', 'label', 'weight'])
-regression_cost = __convert_to_v2__(
-    'regression_cost',
-    name_prefix='regression_cost',
-    parent_names=['input', 'label', 'weight'])
-cross_entropy_cost = __convert_to_v2__(
-    'cross_entropy',
-    name_prefix='cross_entropy',
-    parent_names=['input', 'label'])
-cross_entropy_with_selfnorm_cost = __convert_to_v2__(
-    'cross_entropy_with_selfnorm',
-    name_prefix='cross_entropy_with_selfnorm',
-    parent_names=['input', 'label'])
-multi_binary_label_cross_entropy_cost = __convert_to_v2__(
-    'multi_binary_label_cross_entropy',
-    name_prefix='multi_binary_label_cross_entropy',
-    parent_names=['input', 'label'])
-rank_cost = __convert_to_v2__(
-    'rank_cost',
-    name_prefix='rank_cost',
-    parent_names=['left', 'right', 'label', 'weight'])
-lambda_cost = __convert_to_v2__(
-    'lambda_cost', name_prefix='lambda_cost', parent_names=['input', 'score'])
-sum_cost = __convert_to_v2__(
-    'sum_cost', name_prefix='sum_cost', parent_names=['input'])
-huber_cost = __convert_to_v2__(
-    'huber_cost', name_prefix='huber_cost', parent_names=['input', 'label'])
-
-if __name__ == '__main__':
-    pixel = data(name='pixel', type=data_type.dense_vector(784))
-    label = data(name='label', type=data_type.integer_value(10))
-    weight = data(name='weight', type=data_type.dense_vector(10))
-    score = data(name='score', type=data_type.dense_vector(1))
-
-    hidden = fc(input=pixel,
-                size=100,
-                act=activation.Sigmoid(),
-                param_attr=attr.Param(name='hidden'))
-    inference = fc(input=hidden, size=10, act=activation.Softmax())
-    maxid = max_id(input=inference)
-    cost1 = classification_cost(input=inference, label=label)
-    cost2 = classification_cost(input=inference, label=label, weight=weight)
-    cost3 = cross_entropy_cost(input=inference, label=label)
-    cost4 = cross_entropy_with_selfnorm_cost(input=inference, label=label)
-    cost5 = regression_cost(input=inference, label=label)
-    cost6 = regression_cost(input=inference, label=label, weight=weight)
-    cost7 = multi_binary_label_cross_entropy_cost(input=inference, label=label)
-    cost8 = rank_cost(left=score, right=score, label=score)
-    cost9 = lambda_cost(input=inference, score=score)
-    cost10 = sum_cost(input=inference)
-    cost11 = huber_cost(input=score, label=label)
-
-    print parse_network(cost1, cost2)
-    print parse_network(cost3, cost4)
-    print parse_network(cost5, cost6)
-    print parse_network(cost7, cost8, cost9, cost10, cost11)
-    print parse_network(inference, maxid)
+data.__name__ = 'data'
+AggregateLevel = conf_helps.layers.AggregateLevel
+ExpandLevel = conf_helps.layers.ExpandLevel
+memory = MemoryV2
+
+
+def __layer_name_mapping__(inname):
+    if inname in ['data_layer', 'memory', 'mixed_layer', 'recurrent_group']:
+        # Do Not handle these layers
+        return
+    elif inname == 'maxid_layer':
+        return 'max_id'
+    elif inname.endswith('memory') or inname.endswith(
+            '_seq') or inname.endswith('_sim') or inname == 'hsigmoid':
+        return inname
+    elif inname in [
+            'cross_entropy', 'multi_binary_label_cross_entropy',
+            'cross_entropy_with_selfnorm'
+    ]:
+        return inname + "_cost"
+    elif inname.endswith('_cost'):
+        return inname
+    elif inname.endswith("_layer"):
+        return inname[:-len("_layer")]
+
+
+def __layer_name_mapping_parent_names__(inname):
+    all_args = getattr(conf_helps, inname).argspec.args
+    return filter(
+        lambda x: x in ['input1', 'input2', 'label', 'input', 'a', 'b',
+                        'expand_as',
+                        'weights', 'vectors', 'weight', 'score', 'left',
+                        'right', 'output_mem'],
+        all_args)
+
+
+def __convert_layer__(_new_name_, _old_name_, _parent_names_):
+    global __all__
+    __all__.append(_new_name_)
+    globals()[new_name] = __convert_to_v2__(_old_name_, _parent_names_)
+    globals()[new_name].__name__ = new_name
+
+
+for each_layer_name in dir(conf_helps):
+    new_name = __layer_name_mapping__(each_layer_name)
+    if new_name is not None:
+        parent_names = __layer_name_mapping_parent_names__(each_layer_name)
+        assert len(parent_names) != 0, each_layer_name
+        __convert_layer__(new_name, each_layer_name, parent_names)
+
+del parent_names
+del new_name
+del each_layer_name
+
+
+@wrap_name_default()
+def recurrent_group(step, input, name=None):
+    if not isinstance(input, collections.Sequence):
+        input = [input]
+
+    non_static_inputs = filter(lambda x: not isinstance(x, StaticInputV2),
+                               input)
+    actual_input = [
+        RecurrentLayerInput(
+            recurrent_name=name,
+            index=i,
+            parent_layers={'recurrent_inputs': non_static_inputs})
+        for i in xrange(len(non_static_inputs))
+    ]
+
+    extra_input = None
+    if len(non_static_inputs) == 0:
+        extra_input = RecurrentLayerInput(
+            recurrent_name=name, index=-1, parent_layers={})
+
+    def __real_step__(*args):
+        rnn_input = list(args)
+        static_inputs = filter(lambda x: isinstance(x, StaticInputV2), input)
+        for static_input in static_inputs:
+            mem_name = "__%s_memory__" % static_input.input.name
+            mem = memory(
+                name=mem_name,
+                extra_input=extra_input,
+                is_seq=static_input.is_seq,
+                size=static_input.input.calculate_size,
+                boot_layer=static_input.input)
+            with mixed(
+                    name=mem_name,
+                    size=static_input.input.calculate_size,
+                    act=activation.Identity()) as mix:
+                mix += identity_projection(input=mem)
+            rnn_input.insert(input.index(static_input), mix)
+        return step(*rnn_input)
+
+    actual_output = __real_step__(*actual_input)
+
+    if not isinstance(actual_output, collections.Sequence):
+        actual_output = [actual_output]
+
+    retv = [
+        RecurrentLayerOutput(
+            recurrent_name=name,
+            index=i,
+            parent_layers={'recurrent_outputs': actual_output})
+        for i in xrange(len(actual_output))
+    ]
+    if len(retv) == 1:
+        return retv[0]
+    else:
+        return retv
+
+
+@wrap_name_default()
+def beam_search(step,
+                input,
+                bos_id,
+                eos_id,
+                beam_size,
+                max_length=500,
+                name=None,
+                num_results_per_sample=None):
+    if num_results_per_sample is None:
+        num_results_per_sample = beam_size
+    assert num_results_per_sample <= beam_size
+    # logger.warning("num_results_per_sample should be less than beam_size")
+
+    if isinstance(input, StaticInputV2) or isinstance(input,
+                                                      BaseGeneratedInputV2):
+        input = [input]
+
+    generated_input_index = -1
+
+    real_input = []
+    for i, each_input in enumerate(input):
+        assert isinstance(each_input, StaticInputV2) or isinstance(
+            each_input, BaseGeneratedInputV2)
+        if isinstance(each_input, BaseGeneratedInputV2):
+            assert generated_input_index == -1
+            generated_input_index = i
+        else:
+            real_input.append(each_input)
+
+    assert generated_input_index != -1
+
+    gipt = input[generated_input_index]
+    assert isinstance(gipt, BaseGeneratedInputV2)
+
+    gipt.bos_id = bos_id
+    gipt.eos_id = eos_id
+
+    def __real_step__(*args):
+        eos_name = "__%s_eos_layer__" % name
+        generator = RecurrentLayerGroupSetGeneratorV2(
+            eos_name, max_length, beam_size, num_results_per_sample)
+
+        args = list(args)
+        before_step_layer = gipt.before_real_step()
+        before_step_layer.append_child(
+            layer=generator, parent_names=[before_step_layer.name])
+        args.insert(generated_input_index, before_step_layer)
+
+        predict = gipt.after_real_step(step(*args))
+
+        eos_layer = eos(input=predict, eos_id=eos_id, name=eos_name)
+        predict.append_child(layer=eos_layer, parent_names=[predict.name])
+
+        return predict
+
+    # tmp = paddle.layer.recurrent_group(
+    #     step=__real_step__,
+    #     input=real_input,
+    #     reverse=False,
+    #     name=name,
+    #     is_generating=True)
+    tmp = recurrent_group(step=__real_step__, input=real_input, name=name)
+
+    return tmp
+
+
+__projection_names__ = filter(lambda x: x.endswith('_projection'),
+                              dir(conf_helps))
+
+__all__ += __projection_names__
+
+__operator_names__ = filter(lambda x: x.endswith('_operator'), dir(conf_helps))
+__all__ += __operator_names__
+
+# convert projection
+for prj in __projection_names__:
+    globals()[prj] = __convert_to_v2__(
+        prj, parent_names=['input'], is_default_name=False)
+    globals()[prj].__name__ = prj
+
+# convert operator
+operator_list = [
+    # [V1_method_name, parent_names],
+    ['dotmul_operator', ['a', 'b']],
+    ['conv_operator', ['img', 'filter']]
+]
+for op in operator_list:
+    globals()[op[0]] = __convert_to_v2__(
+        op[0], parent_names=op[1], is_default_name=False)
+    globals()[op[0]].__name__ = op[0]
diff --git a/python/paddle/v2/minibatch.py b/python/paddle/v2/minibatch.py
new file mode 100644
index 0000000000000000000000000000000000000000..317cf037c69f8639e3760fbfce20565127794fcb
--- /dev/null
+++ b/python/paddle/v2/minibatch.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = ['batch']
+
+
+def batch(reader, batch_size):
+    """
+    Create a batched reader.
+
+    :param reader: the data reader to read from.
+    :type reader: callable
+    :param batch_size: size of each mini-batch
+    :type batch_size: int
+    :return: the batched reader.
+    :rtype: callable
+    """
+
+    def batch_reader():
+        r = reader()
+        b = []
+        for instance in r:
+            b.append(instance)
+            if len(b) == batch_size:
+                yield b
+                b = []
+        if b:
+            yield b
+
+    return batch_reader
diff --git a/python/paddle/v2/networks.py b/python/paddle/v2/networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e6644196c8242cc3fed7a4fb1503697e5b59ffb
--- /dev/null
+++ b/python/paddle/v2/networks.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.trainer_config_helpers.networks as conf_nw
+import inspect
+from config_base import __convert_to_v2__
+
+__all__ = []
+
+
+def __initialize__():
+    for each_subnetwork in conf_nw.__all__:
+        if each_subnetwork in ['inputs', 'outputs']:
+            continue
+        func = getattr(conf_nw, each_subnetwork)
+        if hasattr(func, 'argspec'):
+            argspec = func.argspec
+        else:
+            argspec = inspect.getargspec(func)
+        if each_subnetwork == 'simple_attention':
+            parents = ['encoded_sequence', 'encoded_proj', 'decoder_state']
+        else:
+            parents = filter(lambda x: x.startswith('input'), argspec.args)
+        assert len(parents) != 0, each_subnetwork
+        v2_subnet = __convert_to_v2__(
+            each_subnetwork,
+            parent_names=parents,
+            is_default_name='name' in argspec.args)
+        globals()[each_subnetwork] = v2_subnet
+        globals()[each_subnetwork].__name__ = each_subnetwork
+        global __all__
+        __all__.append(each_subnetwork)
+
+
+__initialize__()
diff --git a/python/paddle/v2/optimizer.py b/python/paddle/v2/optimizer.py
index aa2942bc9faeb2a353459cd619886f56ea32f450..feefd7d758ba09f5d8f818ca1b12b00c5f0e9797 100644
--- a/python/paddle/v2/optimizer.py
+++ b/python/paddle/v2/optimizer.py
@@ -1,9 +1,17 @@
 import py_paddle.swig_paddle as swig_api
-import paddle.trainer_config_helpers.optimizers as v1_optimizers
+
 import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils
-import paddle.v2
+import paddle.trainer_config_helpers.optimizers as v1_optimizers
+"""
+Optimizers(update equation) for SGD method.
+
+TODO(yuyang18): Complete comments.
+"""
 
-__all__ = ['Adam', 'Adamax']
+__all__ = [
+    'Momentum', 'Adam', 'Adamax', 'AdaGrad', 'DecayedAdaGrad', 'AdaDelta',
+    'RMSProp', 'ModelAverage', 'L2Regularization'
+]
 
 
 class Optimizer(object):
@@ -38,7 +46,64 @@ class Optimizer(object):
                                                              pass_num)
 
 
+class Momentum(Optimizer):
+    """
+    SGD Optimizer.
+
+    SGD is an optimization method, trying to find a neural network that
+    minimize the "cost/error" of it by iteration. In paddle's implementation
+    SGD Optimizer is synchronized, which means all gradients will be wait to
+    calculate and reduced into one gradient, then do optimize operation.
+
+    The neural network consider the learning problem of minimizing an objective
+    function, that has the form of a sum
+
+    ..  math::
+
+        Q(w) = \\sum_{i}^{n} Q_i(w)
+
+    The value of function Q sometimes is the cost of neural network (Mean
+    Square Error between prediction and label for example). The function Q is
+    parametrised by w, the weight/bias of neural network. And weights is what to
+    be learned. The i is the i-th observation in (trainning) data.
+
+    So, the SGD method will optimize the weight by
+
+    ..  math::
+
+        w = w - \\eta \\nabla Q(w) = w - \\eta \\sum_{i}^{n} \\nabla Q_i(w)
+
+    where :math:`\\eta` is learning rate. And :math:`n` is batch size.
+    """
+
+    def __init__(self, momentum=None, sparse=False, **kwargs):
+        learning_method = v1_optimizers.MomentumOptimizer(
+            momentum=momentum, sparse=sparse)
+        super(Momentum, self).__init__(
+            learning_method=learning_method, **kwargs)
+
+
 class Adam(Optimizer):
+    """
+    Adam optimizer.
+    The details of please refer `Adam: A Method for Stochastic Optimization
+    `_
+
+    ..  math::
+
+        m(w, t) & = \\beta_1 m(w, t-1) + (1 - \\beta_1) \\nabla Q_i(w) \\\\
+        v(w, t) & = \\beta_2 v(w, t-1) + (1 - \\beta_2)(\\nabla Q_i(w)) ^2 \\\\
+        w & = w - \\frac{\\eta}{\\sqrt{v(w,t) + \\epsilon}}
+
+    :param beta1: the :math:`\\beta_1` in equation.
+    :type beta1: float
+    :param beta2: the :math:`\\beta_2` in equation.
+    :type beta2: float
+    :param epsilon: the :math:`\\epsilon` in equation. It is used to prevent
+                        divided by zero.
+    :type epsilon: float
+    """
+
     def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs):
         learning_method = v1_optimizers.AdamOptimizer(
             beta1=beta1, beta2=beta2, epsilon=epsilon)
@@ -46,13 +111,133 @@ class Adam(Optimizer):
 
 
 class Adamax(Optimizer):
+    """
+    Adamax optimizer.
+
+    The details of please refer this `Adam: A Method for Stochastic Optimization
+    `_
+
+    ..  math::
+
+        m_t & = \\beta_1 * m_{t-1} + (1-\\beta_1)* \\nabla Q_i(w) \\\\
+        u_t & = max(\\beta_2*u_{t-1}, abs(\\nabla Q_i(w))) \\\\
+        w_t & = w_{t-1} - (\\eta/(1-\\beta_1^t))*m_t/u_t
+
+    :param beta1: the :math:`\\beta_1` in the equation.
+    :type beta1: float
+    :param beta2: the :math:`\\beta_2` in the equation.
+    :type beta2: float
+    """
+
     def __init__(self, beta1=0.9, beta2=0.999, **kwargs):
         learning_method = v1_optimizers.AdamaxOptimizer(
             beta1=beta1, beta2=beta2)
         super(Adamax, self).__init__(learning_method=learning_method, **kwargs)
 
 
+class AdaGrad(Optimizer):
+    """
+    Adagrad(for ADAptive GRAdient algorithm) optimizer.
+
+    For details please refer this `Adaptive Subgradient Methods for
+    Online Learning and Stochastic Optimization
+    `_.
+
+    ..  math::
+
+        G &= \\sum_{\\tau=1}^{t} g_{\\tau} g_{\\tau}^T \\\\
+        w & = w - \\eta diag(G)^{-\\frac{1}{2}} \\circ g
+    """
+
+    def __init__(self, **kwargs):
+        learning_method = v1_optimizers.AdaGradOptimizer()
+        super(AdaGrad, self).__init__(learning_method=learning_method, **kwargs)
+
+
+class DecayedAdaGrad(Optimizer):
+    """
+    AdaGrad method with decayed sum gradients. The equations of this method
+    show as follow.
+
+    ..  math::
+
+        E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
+        learning\\_rate &= 1/sqrt( ( E(g_t^2) + \\epsilon )
+
+    :param rho: The :math:`\\rho` parameter in that equation
+    :type rho: float
+    :param epsilon: The :math:`\\epsilon` parameter in that equation.
+    :type epsilon: float
+    """
+
+    def __init__(self, rho=0.95, epsilon=1e-06, **kwargs):
+        learning_method = v1_optimizers.DecayedAdaGradOptimizer(
+            rho=rho, epsilon=epsilon)
+        super(DecayedAdaGrad, self).__init__(
+            learning_method=learning_method, **kwargs)
+
+
+class AdaDelta(Optimizer):
+    """
+    AdaDelta method. The details of adadelta please refer to this
+    `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD
+    `_.
+
+    ..  math::
+
+        E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
+        learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\
+                          E(g_t^2) + \\epsilon ) ) \\\\
+        E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2
+
+    :param rho: :math:`\\rho` in equation
+    :type rho: float
+    :param epsilon: :math:`\\rho` in equation
+    :type epsilon: float
+    """
+
+    def __init__(self, rho=0.95, epsilon=1e-06, **kwargs):
+        learning_method = v1_optimizers.AdaDeltaOptimizer(
+            rho=rho, epsilon=epsilon)
+        super(AdaDelta, self).__init__(
+            learning_method=learning_method, **kwargs)
+
+
+class RMSProp(Optimizer):
+    """
+    RMSProp(for Root Mean Square Propagation) optimizer. For details please
+    refer this `slide `_.
+
+    The equations of this method as follows:
+
+    ..  math::
+
+        v(w, t) & = \\rho v(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\
+        w & = w - \\frac{\\eta} {\\sqrt{v(w,t) + \\epsilon}} \\nabla Q_{i}(w)
+
+    :param rho: the :math:`\\rho` in the equation. The forgetting factor.
+    :type rho: float
+    :param epsilon: the :math:`\\epsilon` in the equation.
+    :type epsilon: float
+    """
+
+    def __init__(self, rho=0.95, epsilon=1e-6, **kwargs):
+        learning_method = v1_optimizers.RMSPropOptimizer(
+            rho=rho, epsilon=epsilon)
+        super(RMSProp, self).__init__(learning_method=learning_method, **kwargs)
+
+
+ModelAverage = v1_optimizers.ModelAverage
+L2Regularization = v1_optimizers.L2Regularization
+
 if __name__ == '__main__':
     swig_api.initPaddle('--use_gpu=false')
-    opt = paddle.v2.optimizer.Adam()
-    print opt.enable_types()
+    for opt in [
+            Momentum(), Adam(), Adamax(), AdaGrad(), DecayedAdaGrad(),
+            AdaDelta(), RMSProp(), Adam(
+                model_average=ModelAverage(average_window=0.5),
+                regularization=L2Regularization(rate=0.5),
+                gradient_clipping_threshold=25)
+    ]:
+        print opt, opt.enable_types()
diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py
index ea504d5104716d157add87ed3f6e31ea69e0a3f0..d686d09f220671fce50be0784e354f97cb109f32 100644
--- a/python/paddle/v2/parameters.py
+++ b/python/paddle/v2/parameters.py
@@ -1,26 +1,24 @@
 import numpy as np
-from . import layer as v2_layer
 import py_paddle.swig_paddle as api
 from paddle.proto.ParameterConfig_pb2 import ParameterConfig
+import struct
+import tarfile
+import cStringIO
+from topology import Topology
 
 __all__ = ['Parameters', 'create']
 
 
-def create(*layers):
+def create(layers):
     """
-    Create parameter pool by layers. In paddle, layer can be represent a
-    model config.
+    Create parameter pool by topology.
 
     :param layers:
     :return:
     """
-    for layer in layers:
-        if not isinstance(layer, v2_layer.Layer):
-            raise ValueError(
-                'create must pass a topologies which type is paddle.layer.Layer')
-    model_config = v2_layer.parse_network(*layers)
+    topology = Topology(layers)
     pool = Parameters()
-    for param in model_config.parameters:
+    for param in topology.proto().parameters:
         pool.__append_config__(param)
     return pool
 
@@ -72,6 +70,7 @@ class Parameters(object):
     def keys(self):
         """
         keys are the names of each parameter.
+
         :return: list of parameter name
         :rtype: list
         """
@@ -80,6 +79,7 @@ class Parameters(object):
     def names(self):
         """
         names of each parameter.
+
         :return: list of parameter name
         :rtype: list
         """
@@ -88,6 +88,7 @@ class Parameters(object):
     def has_key(self, key):
         """
         has_key return true if there are such parameter name == key
+
         :param key: Parameter name
         :type key: basestring
         :return: True if contains such key
@@ -123,6 +124,12 @@ class Parameters(object):
 
         if len(self.__gradient_machines__) == 0:
             # create new parameter in python numpy.
+            if len(self.__tmp_params__) != 0:
+                ret_list = [
+                    mat for name, mat in self.__tmp_params__ if name == key
+                ]
+                if len(ret_list) == 1:
+                    return ret_list[0]
             return np.ndarray(shape=shape, dtype=np.float32)
         else:
             for each_gradient_machine in self.__gradient_machines__:
@@ -141,6 +148,7 @@ class Parameters(object):
     def get_shape(self, key):
         """
         get shape of the parameter.
+
         :param key: parameter name
         :type key: basestring
         :return: parameter's shape
@@ -151,7 +159,8 @@ class Parameters(object):
         if not self.has_key(key):
             raise ValueError("No such parameter %s" % key)
         conf = self.__param_conf__[key]
-        return tuple(map(int, conf.dims))
+        dims = conf.dims if conf.dims else (1, conf.size)
+        return tuple(map(int, dims))
 
     def __setitem__(self, key, value):
         """
@@ -195,6 +204,7 @@ class Parameters(object):
     def set(self, parameter_name, value):
         """
         Set parameter by parameter name & matrix.
+
         :param parameter_name: parameter name
         :type parameter_name: basestring
         :param value: parameter matrix
@@ -224,7 +234,69 @@ class Parameters(object):
                 except ValueError:
                     # If no such parameter in gradient machine, then don't copy
                     pass
-            self.__gradient_machines__.append(gradient_machine)
+
+        self.__gradient_machines__.append(gradient_machine)
+
+    def serialize(self, name, f):
+        """
+
+        :param name:
+        :param f:
+        :type f: file
+        :return:
+        """
+        param = self.get(name)
+        size = reduce(lambda a, b: a * b, param.shape)
+        f.write(struct.pack("IIQ", 0, 4, size))
+        param = param.astype(np.float32)
+        f.write(param.tobytes())
+
+    def deserialize(self, name, f):
+        """
+
+        :param name:
+        :param f:
+        :type f: file
+        :return:
+        """
+        f.read(16)  # header
+        arr = np.frombuffer(f.read(), dtype=np.float32)
+        self.set(name, arr.reshape(self.get_shape(name)))
+
+    def to_tar(self, f):
+        tar = tarfile.TarFile(fileobj=f, mode='w')
+        for nm in self.names():
+            buf = cStringIO.StringIO()
+            self.serialize(nm, buf)
+            tarinfo = tarfile.TarInfo(name=nm)
+            buf.seek(0)
+            tarinfo.size = len(buf.getvalue())
+            tar.addfile(tarinfo, buf)
+
+            conf = self.__param_conf__[nm]
+            confStr = conf.SerializeToString()
+            tarinfo = tarfile.TarInfo(name="%s.protobuf" % nm)
+            tarinfo.size = len(confStr)
+            buf = cStringIO.StringIO(confStr)
+            buf.seek(0)
+            tar.addfile(tarinfo, fileobj=buf)
+
+    @staticmethod
+    def from_tar(f):
+        params = Parameters()
+        tar = tarfile.TarFile(fileobj=f, mode='r')
+        for finfo in tar:
+            assert isinstance(finfo, tarfile.TarInfo)
+            if finfo.name.endswith('.protobuf'):
+                f = tar.extractfile(finfo)
+                conf = ParameterConfig()
+                conf.ParseFromString(f.read())
+                params.__append_config__(conf)
+
+        for param_name in params.names():
+            f = tar.extractfile(param_name)
+            params.deserialize(param_name, f)
+        return params
 
 
 def __get_parameter_in_gradient_machine__(gradient_machine, name):
diff --git a/python/paddle/v2/plot/__init__.py b/python/paddle/v2/plot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..acd3013db4e6a57cd1b269266bea82a31e928397
--- /dev/null
+++ b/python/paddle/v2/plot/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from plot import Ploter
+
+__all__ = ['Ploter']
diff --git a/python/paddle/v2/plot/plot.py b/python/paddle/v2/plot/plot.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f7bd039b07db4832295c2374293bffa588eb4ef
--- /dev/null
+++ b/python/paddle/v2/plot/plot.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+
+class PlotData(object):
+    def __init__(self):
+        self.step = []
+        self.value = []
+
+    def append(self, step, value):
+        self.step.append(step)
+        self.value.append(value)
+
+    def reset(self):
+        self.step = []
+        self.value = []
+
+
+class Ploter(object):
+    def __init__(self, *args):
+        self.__args__ = args
+        self.__plot_data__ = {}
+        for title in args:
+            self.__plot_data__[title] = PlotData()
+        # demo in notebooks will use Ploter to plot figure, but when we convert
+        # the ipydb to py file for testing, the import of matplotlib will make the
+        # script crash. So we can use `export DISABLE_PLOT=True` to disable import
+        # these libs
+        self.__disable_plot__ = os.environ.get("DISABLE_PLOT")
+        if not self.__plot_is_disabled__():
+            import matplotlib.pyplot as plt
+            from IPython import display
+            self.plt = plt
+            self.display = display
+
+    def __plot_is_disabled__(self):
+        return self.__disable_plot__ == "True"
+
+    def append(self, title, step, value):
+        assert isinstance(title, basestring)
+        assert self.__plot_data__.has_key(title)
+        data = self.__plot_data__[title]
+        assert isinstance(data, PlotData)
+        data.append(step, value)
+
+    def plot(self):
+        if self.__plot_is_disabled__():
+            return
+
+        titles = []
+        for title in self.__args__:
+            data = self.__plot_data__[title]
+            assert isinstance(data, PlotData)
+            if len(data.step) > 0:
+                titles.append(title)
+                self.plt.plot(data.step, data.value)
+        self.plt.legend(titles, loc='upper left')
+        self.display.clear_output(wait=True)
+        self.display.display(self.plt.gcf())
+        self.plt.gcf().clear()
+
+    def reset(self):
+        for key in self.__plot_data__:
+            data = self.__plot_data__[key]
+            assert isinstance(data, PlotData)
+            data.reset()
diff --git a/python/paddle/v2/plot/tests/CMakeLists.txt b/python/paddle/v2/plot/tests/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..da550a178ce0fe4832b640e0c23505279dedd27a
--- /dev/null
+++ b/python/paddle/v2/plot/tests/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_test(NAME test_ploter
+  COMMAND bash ${PROJ_ROOT}/python/paddle/v2/plot/tests/run_tests.sh
+  ${PYTHON_EXECUTABLE})
diff --git a/python/paddle/v2/plot/tests/__init__.py b/python/paddle/v2/plot/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1abfc08f19505a9010e924e34074e5bc3cc0571
--- /dev/null
+++ b/python/paddle/v2/plot/tests/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import test_ploter
+
+__all__ = ['test_ploter.py']
diff --git a/python/paddle/v2/plot/tests/run_tests.sh b/python/paddle/v2/plot/tests/run_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9c1a4a71ce43f285c4f970eddf6af46a2821a40a
--- /dev/null
+++ b/python/paddle/v2/plot/tests/run_tests.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+pushd `dirname $0` > /dev/null
+SCRIPTPATH=$PWD
+popd > /dev/null
+
+cd $SCRIPTPATH
+$1 -m pip install ../../../../../paddle/dist/*.whl
+
+export DISABLE_PLOT="True"
+test_list="test_ploter.py"
+
+export PYTHONPATH=$PWD/../../../../../python/
+
+for fn in $test_list
+do
+  echo "test $fn"
+  $1 $fn
+  if [ $? -ne 0 ]; then
+    exit 1
+  fi
+done
diff --git a/python/paddle/v2/plot/tests/test_ploter.py b/python/paddle/v2/plot/tests/test_ploter.py
new file mode 100644
index 0000000000000000000000000000000000000000..a75f853ed933dfce651faf758f71feca7cd8d328
--- /dev/null
+++ b/python/paddle/v2/plot/tests/test_ploter.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from paddle.v2.plot import Ploter
+
+
+class TestCommon(unittest.TestCase):
+    def test_append(self):
+        title1 = "title1"
+        title2 = "title2"
+        plot_test = Ploter(title1, title2)
+        plot_test.append(title1, 1, 2)
+        plot_test.append(title1, 2, 5)
+        plot_test.append(title2, 3, 4)
+        self.assertEqual(plot_test.__plot_data__[title1].step, [1, 2])
+        self.assertEqual(plot_test.__plot_data__[title1].value, [2, 5])
+        self.assertEqual(plot_test.__plot_data__[title2].step, [3])
+        self.assertEqual(plot_test.__plot_data__[title2].value, [4])
+        plot_test.reset()
+        self.assertEqual(plot_test.__plot_data__[title1].step, [])
+        self.assertEqual(plot_test.__plot_data__[title1].value, [])
+        self.assertEqual(plot_test.__plot_data__[title2].step, [])
+        self.assertEqual(plot_test.__plot_data__[title2].value, [])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/pooling.py b/python/paddle/v2/pooling.py
new file mode 100644
index 0000000000000000000000000000000000000000..4881c27d1d6d3d926f12aab096f377164debf1ef
--- /dev/null
+++ b/python/paddle/v2/pooling.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle.trainer_config_helpers.poolings
+import copy
+
+__all__ = []
+suffix = 'Pooling'
+
+for name in paddle.trainer_config_helpers.poolings.__all__:
+    new_name = name[:-len(suffix)]
+    globals()[new_name] = copy.copy(
+        getattr(paddle.trainer_config_helpers.poolings, name))
+    globals()[new_name].__name__ = new_name
+    __all__.append(new_name)
diff --git a/python/paddle/v2/reader/__init__.py b/python/paddle/v2/reader/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b059735a924d58714cd88a761eb83143f1192d6
--- /dev/null
+++ b/python/paddle/v2/reader/__init__.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+At training and testing time, PaddlePaddle programs need to read data. To ease
+the users' work to write data reading code, we define that
+
+- A *reader* is a function that reads data (from file, network, random number
+  generator, etc) and yields data items.
+- A *reader creator* is a function that returns a reader function.
+- A *reader decorator* is a function, which accepts one or more readers, and
+  returns a reader.
+- A *batch reader* is a function that reads data (from *reader*, file, network,
+  random number generator, etc) and yields a batch of data items.
+
+#####################
+Data Reader Interface
+#####################
+
+Indeed, *data reader* doesn't have to be a function that reads and yields data
+items. It can be any function with no parameter that creates a iterable
+(anything can be used in :code:`for x in iterable`)\:
+
+..  code-block:: python
+
+    iterable = data_reader()
+
+Element produced from the iterable should be a **single** entry of data,
+**not** a mini batch. That entry of data could be a single item, or a tuple of
+items.
+Item should be of `supported type `_ (e.g., numpy 1d
+array of float32, int, list of int)
+
+An example implementation for single item data reader creator:
+
+..  code-block:: python
+
+    def reader_creator_random_image(width, height):
+        def reader():
+            while True:
+                yield numpy.random.uniform(-1, 1, size=width*height)
+    return reader
+
+An example implementation for multiple item data reader creator:
+
+..  code-block:: python
+
+    def reader_creator_random_image_and_label(width, height, label):
+        def reader():
+            while True:
+                yield numpy.random.uniform(-1, 1, size=width*height), label
+    return reader
+
+
+TODO(yuyang18): Should we add whole design doc here?
+"""
+
+import decorator
+from decorator import *
+
+import creator
+
+__all__ = decorator.__all__ + ['creator']
diff --git a/python/paddle/v2/reader/creator.py b/python/paddle/v2/reader/creator.py
new file mode 100644
index 0000000000000000000000000000000000000000..07142056f872db5113acdd296b17c52b343c1be6
--- /dev/null
+++ b/python/paddle/v2/reader/creator.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Creator package contains some simple reader creator, which could be used in user
+program.
+"""
+
+__all__ = ['np_array', 'text_file']
+
+
+def np_array(x):
+    """
+    Creates a reader that yields elements of x, if it is a
+    numpy vector. Or rows of x, if it is a numpy matrix.
+    Or any sub-hyperplane indexed by the highest dimension.
+
+    :param x: the numpy array to create reader from.
+    :returns: data reader created from x.
+    """
+
+    def reader():
+        if x.ndim < 1:
+            yield x
+
+        for e in x:
+            yield e
+
+    return reader
+
+
+def text_file(path):
+    """
+    Creates a data reader that outputs text line by line from given text file.
+    Trailing new line ('\\\\n') of each line will be removed.
+
+    :path: path of the text file.
+    :returns: data reader of text file
+    """
+
+    def reader():
+        f = open(path, "r")
+        for l in f:
+            yield l.rstrip('\n')
+        f.close()
+
+    return reader
diff --git a/python/paddle/reader/decorator.py b/python/paddle/v2/reader/decorator.py
similarity index 77%
rename from python/paddle/reader/decorator.py
rename to python/paddle/v2/reader/decorator.py
index 9f4234358f469b9741dfef96aab3853d5972b5f4..104ce9a0411413bb8fc65eedf5821f98d6acdba3 100644
--- a/python/paddle/reader/decorator.py
+++ b/python/paddle/v2/reader/decorator.py
@@ -14,13 +14,13 @@
 
 __all__ = [
     'map_readers', 'buffered', 'compose', 'chain', 'shuffle',
-    'ComposeNotAligned'
+    'ComposeNotAligned', 'firstn'
 ]
 
-from Queue import Queue
-from threading import Thread
 import itertools
 import random
+from Queue import Queue
+from threading import Thread
 
 
 def map_readers(func, *readers):
@@ -28,9 +28,11 @@ def map_readers(func, *readers):
     Creates a data reader that outputs return value of function using
     output of each data readers as arguments.
 
-    :param func: function to use.
-    :param *readers: readers whose outputs will be used as arguments of func.
-    :returns: the created data reader.
+    :param func: function to use. The type of func should be (Sample) => Sample
+    :type: callable
+    :param readers: readers whose outputs will be used as arguments of func.
+    :return: the created data reader.
+    :rtype: callable
     """
 
     def reader():
@@ -45,16 +47,19 @@ def map_readers(func, *readers):
 
 def shuffle(reader, buf_size):
     """
-    Creates a data reader whose data output is suffled.
+    Creates a data reader whose data output is shuffled.
 
     Output from the iterator that created by original reader will be
     buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
     is determined by argument buf_size.
 
     :param reader: the original reader whose output will be shuffled.
+    :type reader: callable
     :param buf_size: shuffle buffer size.
+    :type buf_size: int
 
-    :returns:the new reader whose output is shuffled.
+    :return: the new reader whose output is shuffled.
+    :rtype: callable
     """
 
     def data_reader():
@@ -88,7 +93,8 @@ def chain(*readers):
     [0, 0, 0, 1, 1, 1, 2, 2, 2]
 
     :param readers: input readers.
-    :returns: the new data reader.
+    :return: the new data reader.
+    :rtype: callable
     """
 
     def reader():
@@ -115,12 +121,13 @@ def compose(*readers, **kwargs):
     The composed reader will output:
     (1, 2, 3, 4, 5)
 
-    :*readers: readers that will be composed together.
-    :check_alignment: if True, will check if input readers are aligned
+    :param readers: readers that will be composed together.
+    :param check_alignment: if True, will check if input readers are aligned
         correctly. If False, will not check alignment and trailing outputs
         will be discarded. Defaults to True.
+    :type check_alignment: bool
 
-    :returns: the new data reader.
+    :return: the new data reader.
 
     :raises ComposeNotAligned: outputs of readers are not aligned.
         Will not raise when check_alignment is set to False.
@@ -161,7 +168,9 @@ def buffered(reader, size):
     as the buffer is not empty.
     
     :param reader: the data reader to read from.
+    :type reader: callable
     :param size: max buffer size.
+    :type size: int
     
     :returns: the buffered data reader.
     """
@@ -191,3 +200,27 @@ def buffered(reader, size):
             e = q.get()
 
     return data_reader
+
+
+def firstn(reader, n):
+    """
+    Limit the max number of samples that reader could return.
+
+    :param reader: the data reader to read from.
+    :type reader: callable
+    :param n: the max number of samples that return.
+    :type n: int
+    :return: the decorated reader.
+    :rtype: callable
+    """
+
+    # TODO(yuyang18): Check if just drop the reader, could clean the opened
+    # resource or not?
+
+    def firstn_reader():
+        for i, item in enumerate(reader()):
+            if i == n:
+                break
+            yield item
+
+    return firstn_reader
diff --git a/python/paddle/v2/reader/tests/CMakeLists.txt b/python/paddle/v2/reader/tests/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a51f700406b48f8186e45f1ced94765e343a8b5e
--- /dev/null
+++ b/python/paddle/v2/reader/tests/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_test(NAME reader_tests
+  COMMAND bash ${PROJ_ROOT}/python/paddle/v2/reader/tests/run_tests.sh
+  ${PYTHON_EXECUTABLE})
diff --git a/python/paddle/v2/reader/tests/__init__.py b/python/paddle/v2/reader/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python/paddle/v2/reader/tests/creator_test.py b/python/paddle/v2/reader/tests/creator_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f8d7133b8694aae5541eff9576eaba8a31e77dc
--- /dev/null
+++ b/python/paddle/v2/reader/tests/creator_test.py
@@ -0,0 +1,40 @@
+# Copyright PaddlePaddle contributors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import unittest
+
+import numpy as np
+
+import paddle.v2.reader.creator
+
+
+class TestNumpyArray(unittest.TestCase):
+    def test_numpy_array(self):
+        l = [[1, 2, 3], [4, 5, 6]]
+        x = np.array(l, np.int32)
+        reader = paddle.v2.reader.creator.np_array(x)
+        for idx, e in enumerate(reader()):
+            self.assertItemsEqual(e, l[idx])
+
+
+class TestTextFile(unittest.TestCase):
+    def test_text_file(self):
+        path = os.path.join(os.path.dirname(__file__), "test_data_creator.txt")
+        reader = paddle.v2.reader.creator.text_file(path)
+        for idx, e in enumerate(reader()):
+            self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/reader/tests/decorator_test.py b/python/paddle/v2/reader/tests/decorator_test.py
similarity index 81%
rename from python/paddle/reader/tests/decorator_test.py
rename to python/paddle/v2/reader/tests/decorator_test.py
index 0396a61786539bf57be4cab9ebbd108ade9e7c83..734154b9790a4dc118d11992343648364c907305 100644
--- a/python/paddle/reader/tests/decorator_test.py
+++ b/python/paddle/v2/reader/tests/decorator_test.py
@@ -11,9 +11,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import unittest
-import paddle.reader
 import time
+import unittest
+
+import paddle.v2.reader
 
 
 def reader_creator_10(dur):
@@ -37,7 +38,7 @@ class TestMap(unittest.TestCase):
             yield "h"
             yield "i"
 
-        r = paddle.reader.map_readers(tokenize, read)
+        r = paddle.v2.reader.map_readers(tokenize, read)
         for i, e in enumerate(r()):
             self.assertEqual(e, i)
 
@@ -45,7 +46,7 @@ class TestMap(unittest.TestCase):
 class TestBuffered(unittest.TestCase):
     def test_read(self):
         for size in range(20):
-            b = paddle.reader.buffered(reader_creator_10(0), size)
+            b = paddle.v2.reader.buffered(reader_creator_10(0), size)
             c = 0
             for i in b():
                 self.assertEqual(i, c)
@@ -54,7 +55,7 @@ class TestBuffered(unittest.TestCase):
 
     def test_buffering(self):
         # read have 30ms delay.
-        b = paddle.reader.buffered(reader_creator_10(0.03), 10)
+        b = paddle.v2.reader.buffered(reader_creator_10(0.03), 10)
         last_time = time.time()
         for idx, i in enumerate(b()):
             elapsed_time = time.time() - last_time
@@ -68,17 +69,17 @@ class TestBuffered(unittest.TestCase):
 
 class TestCompose(unittest.TestCase):
     def test_compse(self):
-        reader = paddle.reader.compose(
+        reader = paddle.v2.reader.compose(
             reader_creator_10(0), reader_creator_10(0))
         for idx, e in enumerate(reader()):
             self.assertEqual(e, (idx, idx))
 
     def test_compose_not_aligned(self):
         total = 0
-        reader = paddle.reader.compose(
-            paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)),
+        reader = paddle.v2.reader.compose(
+            paddle.v2.reader.chain(reader_creator_10(0), reader_creator_10(0)),
             reader_creator_10(0))
-        with self.assertRaises(paddle.reader.ComposeNotAligned):
+        with self.assertRaises(paddle.v2.reader.ComposeNotAligned):
             for e in reader():
                 total += 1
         # expecting 10, not 20
@@ -86,8 +87,8 @@ class TestCompose(unittest.TestCase):
 
     def test_compose_not_aligned_no_check(self):
         total = 0
-        reader = paddle.reader.compose(
-            paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)),
+        reader = paddle.v2.reader.compose(
+            paddle.v2.reader.chain(reader_creator_10(0), reader_creator_10(0)),
             reader_creator_10(0),
             check_alignment=False)
         for e in reader():
@@ -98,7 +99,7 @@ class TestCompose(unittest.TestCase):
 
 class TestChain(unittest.TestCase):
     def test_chain(self):
-        c = paddle.reader.chain(reader_creator_10(0), reader_creator_10(0))
+        c = paddle.v2.reader.chain(reader_creator_10(0), reader_creator_10(0))
         idx = 0
         for e in c():
             self.assertEqual(e, idx % 10)
@@ -111,7 +112,7 @@ class TestShuffle(unittest.TestCase):
         case = [(0, True), (1, True), (10, False), (100, False)]
         a = reader_creator_10(0)
         for size, checkEq in case:
-            s = paddle.reader.shuffle(a, size)
+            s = paddle.v2.reader.shuffle(a, size)
             total = 0
             for idx, e in enumerate(s()):
                 if checkEq:
diff --git a/python/paddle/v2/reader/tests/run_tests.sh b/python/paddle/v2/reader/tests/run_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a544a5636021bcf8bd9a35966c91ae343c149d14
--- /dev/null
+++ b/python/paddle/v2/reader/tests/run_tests.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+pushd `dirname $0` > /dev/null
+SCRIPTPATH=$PWD
+popd > /dev/null
+
+cd $SCRIPTPATH
+$1 -m pip install ../../../../../paddle/dist/*.whl
+
+test_list="creator_test.py decorator_test.py"
+
+export PYTHONPATH=$PWD/../../../../../python/
+
+for fn in $test_list
+do
+  echo "test $fn"
+  $1 $fn
+  if [ $? -ne 0 ]; then
+    exit 1
+  fi
+done
diff --git a/python/paddle/v2/reader/tests/test_data_creator.txt b/python/paddle/v2/reader/tests/test_data_creator.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a2a8d47d43868d369083808497697da79e620e31
--- /dev/null
+++ b/python/paddle/v2/reader/tests/test_data_creator.txt
@@ -0,0 +1,3 @@
+0 1
+2 3
+4 5
diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt
index 402ad2e664c773dc7a34c46f57ffe70d6039a09a..572deaff356712cac23cd7911cdf289db100564c 100644
--- a/python/paddle/v2/tests/CMakeLists.txt
+++ b/python/paddle/v2/tests/CMakeLists.txt
@@ -1,4 +1,16 @@
+add_test(NAME test_v2_api
+        COMMAND bash ${PROJ_ROOT}/python/paddle/v2/tests/run_tests.sh ${PYTHON_EXECUTABLE})
+
 add_test(NAME test_v2_layer
         COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
         ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_layer.py
         WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle)
+
+add_test(NAME test_v2_rnn_layer
+        COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
+        ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_rnn_layer.py)
+
+add_test(NAME test_topology
+        COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
+        ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_topology.py
+        WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle)
diff --git a/python/paddle/v2/tests/run_tests.sh b/python/paddle/v2/tests/run_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..dda1b1bd222a9f226db1a4bd730e9637ab882196
--- /dev/null
+++ b/python/paddle/v2/tests/run_tests.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+pushd `dirname $0` > /dev/null
+SCRIPTPATH=$PWD
+popd > /dev/null
+
+cd $SCRIPTPATH
+
+$1 -m pip install ../../../../paddle/dist/*.whl
+
+test_list="test_data_feeder.py test_parameters.py"
+
+export PYTHONPATH=$PWD/../../../../python/
+
+for fn in $test_list
+do
+  echo "test $fn"
+  $1 $fn
+  if [ $? -ne 0 ]; then
+    exit 1
+  fi
+done
diff --git a/python/paddle/v2/tests/test_data_feeder.py b/python/paddle/v2/tests/test_data_feeder.py
new file mode 100644
index 0000000000000000000000000000000000000000..71eb3bf31425c22b47accc11c9550042e077ef12
--- /dev/null
+++ b/python/paddle/v2/tests/test_data_feeder.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import py_paddle.swig_paddle as api
+import numpy as np
+
+from paddle.v2 import data_type
+from paddle.v2.data_feeder import DataFeeder
+
+
+class DataFeederTest(unittest.TestCase):
+    def dense_reader(self, size):
+        data = np.random.random(size)
+        return data
+
+    def sparse_binary_reader(self, high, size_limit, non_empty=False):
+        num = np.random.randint(size_limit)  # num could be 0
+        while non_empty and num == 0:
+            num = np.random.randint(size_limit)
+        return np.random.randint(high, size=num).tolist()
+
+    def test_dense(self):
+        def compare(input):
+            feeder = DataFeeder([('image', data_type.dense_vector(784))],
+                                {'image': 0})
+            arg = feeder(input)
+            output = arg.getSlotValue(0).copyToNumpyMat()
+            input = np.array(input, dtype='float32')
+            self.assertAlmostEqual(input.all(), output.all())
+
+        # test numpy array
+        batch_size = 32
+        dim = 784
+        data = []
+        for i in xrange(batch_size):
+            each_sample = []
+            each_sample.append(self.dense_reader(dim))
+            data.append(each_sample)
+        compare(data)
+
+        # each feature is a list
+        data = []
+        for i in xrange(batch_size):
+            each_sample = []
+            each_sample.append(self.dense_reader(dim).tolist())
+            data.append(each_sample)
+        compare(data)
+
+        # test tuple
+        data = []
+        for i in xrange(batch_size):
+            each_sample = (self.dense_reader(dim).tolist(), )
+            data.append(each_sample)
+        compare(data)
+
+    def test_sparse_binary(self):
+        dim = 10000
+        batch_size = 32
+        data = []
+        for i in xrange(batch_size):
+            each_sample = []
+            each_sample.append(self.sparse_binary_reader(dim, 50))
+            data.append(each_sample)
+        feeder = DataFeeder([('input', data_type.sparse_binary_vector(dim))],
+                            {'input': 0})
+        arg = feeder(data)
+        output = arg.getSlotValue(0)
+        assert isinstance(output, api.Matrix)
+        for i in xrange(batch_size):
+            self.assertEqual(output.getSparseRowCols(i), data[i][0])
+
+    def test_sparse(self):
+        dim = 10000
+        batch_size = 32
+        v = []
+        w = []
+        data = []
+        for dat in xrange(batch_size):
+            each_sample = []
+            a = self.sparse_binary_reader(dim, 40, non_empty=True)
+            b = self.dense_reader(len(a)).tolist()
+            v.append(a)
+            w.append(np.array(b, dtype="float32"))
+            each_sample.append(zip(a, b))
+            data.append(each_sample)
+
+        feeder = DataFeeder([('input', data_type.sparse_vector(dim))],
+                            {'input': 0})
+        arg = feeder(data)
+        output = arg.getSlotValue(0)
+        assert isinstance(output, api.Matrix)
+        for i in xrange(batch_size):
+            self.assertEqual(output.getSparseRowCols(i), v[i])
+            cols_value = output.getSparseRowColsVal(i)
+            value = [val[1] for val in cols_value]
+            value = np.array(value, dtype="float32")
+            self.assertAlmostEqual(value.all(), w[i].all())
+
+    def test_integer(self):
+        value_range = 100
+        batch_size = 32
+        index = []
+        for i in xrange(batch_size):
+            each_sample = []
+            each_sample.append(np.random.randint(value_range))
+            index.append(each_sample)
+        feeder = DataFeeder([('input', data_type.integer_value(value_range))],
+                            {'input': 0})
+        arg = feeder(index)
+        output = arg.getSlotIds(0).copyToNumpyArray()
+        index = np.array(index, dtype='int')
+        self.assertEqual(output.all(), index.flatten().all())
+
+    def test_integer_sequence(self):
+        value_range = 10000
+        batch_size = 32
+        start = [0]
+        data = []
+        for i in xrange(batch_size):
+            each_sample = []
+            each_sample.append(
+                self.sparse_binary_reader(
+                    value_range, 30, non_empty=True))
+            data.append(each_sample)
+            start.append(len(each_sample[0]) + start[-1])
+        feeder = DataFeeder(
+            [('input', data_type.integer_value_sequence(value_range))],
+            {'input': 0})
+        arg = feeder(data)
+        output_data = arg.getSlotIds(0).copyToNumpyArray()
+        output_start = arg.getSlotSequenceStartPositions(0).copyToNumpyArray()
+
+        index = []
+        for dat in data:
+            index.extend(x for x in dat[0])  # only one feature, so dat[0]
+        index = np.array(index, dtype='int')
+        start = np.array(start, dtype='int')
+        self.assertEqual(output_data.all(), index.all())
+        self.assertEqual(output_start.all(), start.all())
+
+    def test_multiple_features(self):
+        batch_size = 2
+        data = []
+        for i in xrange(batch_size):
+            each_sample = []
+            each_sample.append(np.random.randint(10))
+            each_sample.append(
+                self.sparse_binary_reader(
+                    20000, 40, non_empty=True))
+            each_sample.append(self.dense_reader(100))
+            data.append(each_sample)
+
+        # test multiple features
+        data_types = [('fea0', data_type.dense_vector(100)),
+                      ('fea1', data_type.sparse_binary_vector(20000)),
+                      ('fea2', data_type.integer_value(10))]
+        feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
+        arg = feeder(data)
+        output_dense = arg.getSlotValue(0).copyToNumpyMat()
+        output_sparse = arg.getSlotValue(1)
+        output_index = arg.getSlotIds(2).copyToNumpyArray()
+        for i in xrange(batch_size):
+            self.assertEqual(output_dense[i].all(), data[i][2].all())
+            self.assertEqual(output_sparse.getSparseRowCols(i), data[i][1])
+            self.assertEqual(output_index[i], data[i][0])
+
+        # reader returns 3 features, but only use 2 features
+        data_types = [('fea0', data_type.dense_vector(100)),
+                      ('fea2', data_type.integer_value(10))]
+        feeder = DataFeeder(data_types, {'fea0': 2, 'fea2': 0})
+        arg = feeder(data)
+        output_dense = arg.getSlotValue(0).copyToNumpyMat()
+        output_index = arg.getSlotIds(1).copyToNumpyArray()
+        for i in xrange(batch_size):
+            self.assertEqual(output_dense[i].all(), data[i][2].all())
+            self.assertEqual(output_index[i], data[i][0])
+
+        # reader returns 3 featreus, one is duplicate data
+        data_types = [('fea0', data_type.dense_vector(100)),
+                      ('fea1', data_type.sparse_binary_vector(20000)),
+                      ('fea2', data_type.integer_value(10)),
+                      ('fea3', data_type.dense_vector(100))]
+        feeder = DataFeeder(data_types,
+                            {'fea0': 2,
+                             'fea1': 1,
+                             'fea2': 0,
+                             'fea3': 2})
+        arg = feeder(data)
+        fea0 = arg.getSlotValue(0).copyToNumpyMat()
+        fea1 = arg.getSlotValue(1)
+        fea2 = arg.getSlotIds(2).copyToNumpyArray()
+        fea3 = arg.getSlotValue(3).copyToNumpyMat()
+        for i in xrange(batch_size):
+            self.assertEqual(fea0[i].all(), data[i][2].all())
+            self.assertEqual(fea1.getSparseRowCols(i), data[i][1])
+            self.assertEqual(fea2[i], data[i][0])
+            self.assertEqual(fea3[i].all(), data[i][2].all())
+
+    def test_multiple_features_tuple(self):
+        batch_size = 2
+        data = []
+        for i in xrange(batch_size):
+            a = np.random.randint(10)
+            b = self.sparse_binary_reader(20000, 40, non_empty=True)
+            c = self.dense_reader(100)
+            each_sample = (a, b, c)
+            data.append(each_sample)
+
+        # test multiple features
+        data_types = [('fea0', data_type.dense_vector(100)),
+                      ('fea1', data_type.sparse_binary_vector(20000)),
+                      ('fea2', data_type.integer_value(10))]
+        feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
+        arg = feeder(data)
+        out_dense = arg.getSlotValue(0).copyToNumpyMat()
+        out_sparse = arg.getSlotValue(1)
+        out_index = arg.getSlotIds(2).copyToNumpyArray()
+        for i in xrange(batch_size):
+            self.assertEqual(out_dense[i].all(), data[i][2].all())
+            self.assertEqual(out_sparse.getSparseRowCols(i), data[i][1])
+            self.assertEqual(out_index[i], data[i][0])
+
+
+if __name__ == '__main__':
+    api.initPaddle("--use_gpu=0")
+    suite = unittest.TestLoader().loadTestsFromTestCase(DataFeederTest)
+    unittest.TextTestRunner().run(suite)
+    if api.isGpuVersion():
+        api.setUseGpu(True)
+        unittest.main()
diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py
index b600e8cf765122ab6cfe8530465391c92be0590f..c67f3b84d96eb92d94ad80cc54c5e056103c1a1a 100644
--- a/python/paddle/v2/tests/test_layer.py
+++ b/python/paddle/v2/tests/test_layer.py
@@ -11,26 +11,119 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-import difflib
 import unittest
 
-import paddle.trainer_config_helpers as conf_helps
 import paddle.v2.activation as activation
 import paddle.v2.attr as attr
 import paddle.v2.data_type as data_type
 import paddle.v2.layer as layer
-from paddle.trainer_config_helpers.config_parser_utils import \
-    parse_network_config as parse_network
+import paddle.v2.pooling as pooling
+import paddle.v2.networks as networks
+import paddle.v2.evaluator as evaluator
 
-pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
+pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
 label = layer.data(name='label', type=data_type.integer_value(10))
-weight = layer.data(name='weight', type=data_type.dense_vector(10))
+weight = layer.data(name='weight', type=data_type.dense_vector(1))
+combine_weight = layer.data(
+    name='weight_combine', type=data_type.dense_vector(10))
 score = layer.data(name='score', type=data_type.dense_vector(1))
+
 hidden = layer.fc(input=pixel,
                   size=100,
                   act=activation.Sigmoid(),
                   param_attr=attr.Param(name='hidden'))
 inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
+conv = layer.img_conv(
+    input=pixel,
+    filter_size=1,
+    filter_size_y=1,
+    num_channels=8,
+    num_filters=16,
+    act=activation.Linear())
+
+
+class ImageLayerTest(unittest.TestCase):
+    def test_conv_layer(self):
+        conv_shift = layer.conv_shift(a=pixel, b=score)
+        print layer.parse_network(conv, conv_shift)
+
+    def test_pooling_layer(self):
+        maxpool = layer.img_pool(
+            input=conv,
+            pool_size=2,
+            num_channels=16,
+            padding=1,
+            pool_type=pooling.Max())
+        spp = layer.spp(input=conv,
+                        pyramid_height=2,
+                        num_channels=16,
+                        pool_type=pooling.Max())
+        maxout = layer.maxout(input=conv, num_channels=16, groups=4)
+        print layer.parse_network([maxpool, spp, maxout])
+
+    def test_norm_layer(self):
+        norm1 = layer.img_cmrnorm(input=conv, size=5)
+        norm2 = layer.batch_norm(input=conv)
+        norm3 = layer.sum_to_one_norm(input=conv)
+        print layer.parse_network([norm1, norm2, norm3])
+
+
+class AggregateLayerTest(unittest.TestCase):
+    def test_aggregate_layer(self):
+        pool = layer.pooling(
+            input=pixel,
+            pooling_type=pooling.Avg(),
+            agg_level=layer.AggregateLevel.EACH_SEQUENCE)
+        last_seq = layer.last_seq(input=pixel)
+        first_seq = layer.first_seq(input=pixel)
+        concat = layer.concat(input=[last_seq, first_seq])
+        seq_concat = layer.seq_concat(a=last_seq, b=first_seq)
+        print layer.parse_network(
+            [pool, last_seq, first_seq, concat, seq_concat])
+
+
+class MathLayerTest(unittest.TestCase):
+    def test_math_layer(self):
+        addto = layer.addto(input=[pixel, pixel])
+        linear_comb = layer.linear_comb(
+            weights=combine_weight, vectors=hidden, size=10)
+        interpolation = layer.interpolation(
+            input=[hidden, hidden], weight=score)
+        bilinear = layer.bilinear_interp(input=conv, out_size_x=4, out_size_y=4)
+        power = layer.power(input=pixel, weight=score)
+        scaling = layer.scaling(input=pixel, weight=score)
+        slope = layer.slope_intercept(input=pixel)
+        tensor = layer.tensor(a=pixel, b=pixel, size=1000)
+        cos_sim = layer.cos_sim(a=pixel, b=pixel)
+        trans = layer.trans(input=tensor)
+        print layer.parse_network([
+            addto, linear_comb, interpolation, power, scaling, slope, tensor,
+            cos_sim, trans
+        ])
+
+
+class ReshapeLayerTest(unittest.TestCase):
+    def test_reshape_layer(self):
+        block_expand = layer.block_expand(
+            input=conv, num_channels=4, stride_x=1, block_x=1)
+        expand = layer.expand(
+            input=weight,
+            expand_as=pixel,
+            expand_level=layer.ExpandLevel.FROM_TIMESTEP)
+        repeat = layer.repeat(input=pixel, num_repeats=4)
+        reshape = layer.seq_reshape(input=pixel, reshape_size=4)
+        rotate = layer.rotate(input=pixel, height=16, width=49)
+        print layer.parse_network(
+            [block_expand, expand, repeat, reshape, rotate])
+
+
+class RecurrentLayerTest(unittest.TestCase):
+    def test_recurrent_layer(self):
+        word = layer.data(name='word', type=data_type.integer_value(12))
+        recurrent = layer.recurrent(input=word)
+        lstm = layer.lstmemory(input=word)
+        gru = layer.grumemory(input=word)
+        print layer.parse_network([recurrent, lstm, gru])
 
 
 class CostLayerTest(unittest.TestCase):
@@ -41,9 +134,8 @@ class CostLayerTest(unittest.TestCase):
         cost3 = layer.cross_entropy_cost(input=inference, label=label)
         cost4 = layer.cross_entropy_with_selfnorm_cost(
             input=inference, label=label)
-        cost5 = layer.regression_cost(input=inference, label=label)
-        cost6 = layer.regression_cost(
-            input=inference, label=label, weight=weight)
+        cost5 = layer.mse_cost(input=inference, label=label)
+        cost6 = layer.mse_cost(input=inference, label=label, weight=weight)
         cost7 = layer.multi_binary_label_cross_entropy_cost(
             input=inference, label=label)
         cost8 = layer.rank_cost(left=score, right=score, label=score)
@@ -51,12 +143,143 @@ class CostLayerTest(unittest.TestCase):
         cost10 = layer.sum_cost(input=inference)
         cost11 = layer.huber_cost(input=score, label=label)
 
-        print dir(layer)
-        layer.parse_network(cost1, cost2)
-        print dir(layer)
-        #print layer.parse_network(cost3, cost4)
-        #print layer.parse_network(cost5, cost6)
-        #print layer.parse_network(cost7, cost8, cost9, cost10, cost11)
+        print layer.parse_network([cost1, cost2])
+        print layer.parse_network([cost3, cost4])
+        print layer.parse_network([cost5, cost6])
+        print layer.parse_network([cost7, cost8, cost9, cost10, cost11])
+
+        crf = layer.crf(input=inference, label=label)
+        crf_decoding = layer.crf_decoding(input=inference, size=3)
+        ctc = layer.ctc(input=inference, label=label)
+        warp_ctc = layer.warp_ctc(input=pixel, label=label)
+        nce = layer.nce(input=inference, label=label, num_classes=3)
+        hsigmoid = layer.hsigmoid(input=inference, label=label, num_classes=3)
+
+        print layer.parse_network(
+            [crf, crf_decoding, ctc, warp_ctc, nce, hsigmoid])
+
+
+class OtherLayerTest(unittest.TestCase):
+    def test_sampling_layer(self):
+        maxid = layer.max_id(input=inference)
+        sampling_id = layer.sampling_id(input=inference)
+        eos = layer.eos(input=maxid, eos_id=5)
+        print layer.parse_network([maxid, sampling_id, eos])
+
+    def test_slicing_joining_layer(self):
+        pad = layer.pad(input=conv, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
+        print layer.parse_network(pad)
+
+
+class ProjOpTest(unittest.TestCase):
+    def test_projection(self):
+        input = layer.data(name='data', type=data_type.dense_vector(784))
+        word = layer.data(
+            name='word', type=data_type.integer_value_sequence(10000))
+        fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid())
+        fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid())
+        mixed0 = layer.mixed(
+            size=256,
+            input=[
+                layer.full_matrix_projection(input=fc0),
+                layer.full_matrix_projection(input=fc1)
+            ])
+        with layer.mixed(size=200) as mixed1:
+            mixed1 += layer.full_matrix_projection(input=fc0)
+            mixed1 += layer.identity_projection(input=fc1)
+
+        table = layer.table_projection(input=word)
+        emb0 = layer.mixed(size=512, input=table)
+        with layer.mixed(size=512) as emb1:
+            emb1 += table
+
+        scale = layer.scaling_projection(input=fc0)
+        scale0 = layer.mixed(size=100, input=scale)
+        with layer.mixed(size=100) as scale1:
+            scale1 += scale
+
+        dotmul = layer.dotmul_projection(input=fc0)
+        dotmul0 = layer.mixed(size=100, input=dotmul)
+        with layer.mixed(size=100) as dotmul1:
+            dotmul1 += dotmul
+
+        context = layer.context_projection(input=fc0, context_len=5)
+        context0 = layer.mixed(size=100, input=context)
+        with layer.mixed(size=100) as context1:
+            context1 += context
+
+        conv = layer.conv_projection(
+            input=input,
+            filter_size=1,
+            num_channels=1,
+            num_filters=128,
+            stride=1,
+            padding=0)
+        conv0 = layer.mixed(input=conv, bias_attr=True)
+        with layer.mixed(bias_attr=True) as conv1:
+            conv1 += conv
+
+        print layer.parse_network(mixed0)
+        print layer.parse_network(mixed1)
+        print layer.parse_network(emb0)
+        print layer.parse_network(emb1)
+        print layer.parse_network(scale0)
+        print layer.parse_network(scale1)
+        print layer.parse_network(dotmul0)
+        print layer.parse_network(dotmul1)
+        print layer.parse_network(conv0)
+        print layer.parse_network(conv1)
+
+    def test_operator(self):
+        ipt0 = layer.data(name='data', type=data_type.dense_vector(784))
+        ipt1 = layer.data(name='word', type=data_type.dense_vector(128))
+        fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
+        fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
+
+        dotmul_op = layer.dotmul_operator(a=fc0, b=fc1)
+        dotmul0 = layer.mixed(input=dotmul_op)
+        with layer.mixed() as dotmul1:
+            dotmul1 += dotmul_op
+
+        conv = layer.conv_operator(
+            img=ipt0,
+            filter=ipt1,
+            filter_size=1,
+            num_channels=1,
+            num_filters=128,
+            stride=1,
+            padding=0)
+        conv0 = layer.mixed(input=conv)
+        with layer.mixed() as conv1:
+            conv1 += conv
+
+        print layer.parse_network(dotmul0)
+        print layer.parse_network(dotmul1)
+        print layer.parse_network(conv0)
+        print layer.parse_network(conv1)
+
+
+class NetworkTests(unittest.TestCase):
+    def test_vgg(self):
+        img = layer.data(name='pixel', type=data_type.dense_vector(784))
+        vgg_out = networks.small_vgg(
+            input_image=img, num_channels=1, num_classes=2)
+        print layer.parse_network(vgg_out)
+
+
+class EvaluatorTest(unittest.TestCase):
+    def test_evaluator(self):
+        img = layer.data(name='pixel', type=data_type.dense_vector(784))
+        output = layer.fc(input=img,
+                          size=10,
+                          act=activation.Softmax(),
+                          name='fc_here')
+        lbl = layer.data(name='label', type=data_type.integer_value(10))
+        cost = layer.cross_entropy_cost(input=output, label=lbl)
+
+        evaluator.classification_error(input=output, label=lbl)
+        print layer.parse_network(cost)
+        print layer.parse_network(output)
 
 
 if __name__ == '__main__':
diff --git a/python/paddle/v2/tests/test_parameters.py b/python/paddle/v2/tests/test_parameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebb182caab6430862a8e4da2ae4ea6b1e72f726c
--- /dev/null
+++ b/python/paddle/v2/tests/test_parameters.py
@@ -0,0 +1,60 @@
+import unittest
+import sys
+
+try:
+    import py_paddle
+
+    del py_paddle
+except ImportError:
+    print >> sys.stderr, "It seems swig of Paddle is not installed, this " \
+                         "unittest will not be run."
+    sys.exit(0)
+
+import paddle.v2.parameters as parameters
+from paddle.proto.ParameterConfig_pb2 import ParameterConfig
+import random
+import cStringIO
+import numpy
+
+
+def __rand_param_config__(name):
+    conf = ParameterConfig()
+    conf.name = name
+    size = 1
+    for i in xrange(2):
+        dim = random.randint(1, 1000)
+        conf.dims.append(dim)
+        size *= dim
+    conf.size = size
+    assert conf.IsInitialized()
+    return conf
+
+
+class TestParameters(unittest.TestCase):
+    def test_serialization(self):
+        params = parameters.Parameters()
+        params.__append_config__(__rand_param_config__("param_0"))
+        params.__append_config__(__rand_param_config__("param_1"))
+
+        for name in params.names():
+            param = params.get(name)
+            param[:] = numpy.random.uniform(
+                -1.0, 1.0, size=params.get_shape(name))
+            params.set(name, param)
+
+        tmp_file = cStringIO.StringIO()
+        params.to_tar(tmp_file)
+        tmp_file.seek(0)
+        params_dup = parameters.Parameters.from_tar(tmp_file)
+
+        self.assertEqual(params_dup.names(), params.names())
+
+        for name in params.names():
+            self.assertEqual(params.get_shape(name), params_dup.get_shape(name))
+            p0 = params.get(name)
+            p1 = params_dup.get(name)
+            self.assertTrue(numpy.isclose(p0, p1).all())
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/tests/test_rnn_layer.py b/python/paddle/v2/tests/test_rnn_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fbbd20eb76bb9daab2bcf98c4adad989106a377
--- /dev/null
+++ b/python/paddle/v2/tests/test_rnn_layer.py
@@ -0,0 +1,155 @@
+# Copyright PaddlePaddle contributors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import difflib
+import unittest
+
+import paddle.trainer_config_helpers as conf_helps
+import paddle.v2.activation as activation
+import paddle.v2.data_type as data_type
+import paddle.v2.layer as layer
+from paddle.trainer_config_helpers.config_parser_utils import \
+    parse_network_config as parse_network
+
+
+class RNNTest(unittest.TestCase):
+    def test_simple_rnn(self):
+        dict_dim = 10
+        word_dim = 8
+        hidden_dim = 8
+
+        def parse_old_rnn():
+            def step(y):
+                mem = conf_helps.memory(name="rnn_state", size=hidden_dim)
+                out = conf_helps.fc_layer(
+                    input=[y, mem],
+                    size=hidden_dim,
+                    act=activation.Tanh(),
+                    bias_attr=True,
+                    name="rnn_state")
+                return out
+
+            def test():
+                data = conf_helps.data_layer(name="word", size=dict_dim)
+                embd = conf_helps.embedding_layer(input=data, size=word_dim)
+                conf_helps.recurrent_group(name="rnn", step=step, input=embd)
+
+            return str(parse_network(test))
+
+        def parse_new_rnn():
+            def new_step(y):
+                mem = layer.memory(name="rnn_state", size=hidden_dim)
+                out = layer.fc(input=[y, mem],
+                               size=hidden_dim,
+                               act=activation.Tanh(),
+                               bias_attr=True,
+                               name="rnn_state")
+                return out
+
+            data = layer.data(
+                name="word", type=data_type.integer_value(dict_dim))
+            embd = layer.embedding(input=data, size=word_dim)
+            rnn_layer = layer.recurrent_group(
+                name="rnn", step=new_step, input=embd)
+            return str(layer.parse_network(rnn_layer))
+
+        diff = difflib.unified_diff(parse_old_rnn().splitlines(1),
+                                    parse_new_rnn().splitlines(1))
+        print ''.join(diff)
+
+    def test_sequence_rnn_multi_input(self):
+        dict_dim = 10
+        word_dim = 8
+        hidden_dim = 8
+        label_dim = 3
+
+        def parse_old_rnn():
+            def test():
+                data = conf_helps.data_layer(name="word", size=dict_dim)
+                label = conf_helps.data_layer(name="label", size=label_dim)
+                emb = conf_helps.embedding_layer(input=data, size=word_dim)
+                boot_layer = conf_helps.data_layer(name="boot", size=10)
+                boot_layer = conf_helps.fc_layer(
+                    name='boot_fc', input=boot_layer, size=10)
+
+                def step(y, wid):
+                    z = conf_helps.embedding_layer(input=wid, size=word_dim)
+                    mem = conf_helps.memory(
+                        name="rnn_state",
+                        size=hidden_dim,
+                        boot_layer=boot_layer)
+                    out = conf_helps.fc_layer(
+                        input=[y, z, mem],
+                        size=hidden_dim,
+                        act=conf_helps.TanhActivation(),
+                        bias_attr=True,
+                        name="rnn_state")
+                    return out
+
+                out = conf_helps.recurrent_group(
+                    name="rnn", step=step, input=[emb, data])
+
+                rep = conf_helps.last_seq(input=out)
+                prob = conf_helps.fc_layer(
+                    size=label_dim,
+                    input=rep,
+                    act=conf_helps.SoftmaxActivation(),
+                    bias_attr=True)
+
+                conf_helps.outputs(
+                    conf_helps.classification_cost(
+                        input=prob, label=label))
+
+            return str(parse_network(test))
+
+        def parse_new_rnn():
+            data = layer.data(
+                name="word", type=data_type.dense_vector(dict_dim))
+            label = layer.data(
+                name="label", type=data_type.dense_vector(label_dim))
+            emb = layer.embedding(input=data, size=word_dim)
+            boot_layer = layer.data(
+                name="boot", type=data_type.dense_vector(10))
+            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)
+
+            def step(y, wid):
+                z = layer.embedding(input=wid, size=word_dim)
+                mem = layer.memory(
+                    name="rnn_state", size=hidden_dim, boot_layer=boot_layer)
+                out = layer.fc(input=[y, z, mem],
+                               size=hidden_dim,
+                               act=activation.Tanh(),
+                               bias_attr=True,
+                               name="rnn_state")
+                return out
+
+            out = layer.recurrent_group(
+                name="rnn", step=step, input=[emb, data])
+
+            rep = layer.last_seq(input=out)
+            prob = layer.fc(size=label_dim,
+                            input=rep,
+                            act=activation.Softmax(),
+                            bias_attr=True)
+
+            cost = layer.classification_cost(input=prob, label=label)
+
+            return str(layer.parse_network(cost))
+
+        diff = difflib.unified_diff(parse_old_rnn().splitlines(1),
+                                    parse_new_rnn().splitlines(1))
+        print ''.join(diff)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/tests/test_topology.py b/python/paddle/v2/tests/test_topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c6dbcdb4f49b960fb8b71aecbad4f013d2cd283
--- /dev/null
+++ b/python/paddle/v2/tests/test_topology.py
@@ -0,0 +1,84 @@
+# Copyright PaddlePaddle contributors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import unittest
+import paddle.v2.layer as layer
+import paddle.v2.topology as topology
+import paddle.v2.data_type as data_type
+import paddle.trainer_config_helpers as conf_helps
+import paddle.trainer.PyDataProvider2 as pydp2
+
+
+class TestTopology(unittest.TestCase):
+    def test_data_type(self):
+        pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
+        label = layer.data(name='label', type=data_type.integer_value(10))
+        hidden = layer.fc(input=pixel,
+                          size=100,
+                          act=conf_helps.SigmoidActivation())
+        inference = layer.fc(input=hidden,
+                             size=10,
+                             act=conf_helps.SoftmaxActivation())
+        cost = layer.classification_cost(input=inference, label=label)
+        topo = topology.Topology(cost)
+        data_types = topo.data_type()
+        self.assertEqual(len(data_types), 2)
+        pixel_data_type = filter(lambda type: type[0] == "pixel", data_types)
+        self.assertEqual(len(pixel_data_type), 1)
+        pixel_data_type = pixel_data_type[0]
+        self.assertEqual(pixel_data_type[1].type, pydp2.DataType.Dense)
+        self.assertEqual(pixel_data_type[1].dim, 784)
+
+        label_data_type = filter(lambda type: type[0] == "label", data_types)
+        self.assertEqual(len(label_data_type), 1)
+        label_data_type = label_data_type[0]
+        self.assertEqual(label_data_type[1].type, pydp2.DataType.Index)
+        self.assertEqual(label_data_type[1].dim, 10)
+
+    def test_get_layer(self):
+        pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
+        label = layer.data(name='label', type=data_type.integer_value(10))
+        hidden = layer.fc(input=pixel,
+                          size=100,
+                          act=conf_helps.SigmoidActivation())
+        inference = layer.fc(input=hidden,
+                             size=10,
+                             act=conf_helps.SoftmaxActivation())
+        cost = layer.classification_cost(input=inference, label=label)
+        topo = topology.Topology(cost)
+        pixel_layer = topo.get_layer("pixel")
+        label_layer = topo.get_layer("label")
+        self.assertEqual(pixel_layer, pixel)
+        self.assertEqual(label_layer, label)
+
+    def test_parse(self):
+        pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
+        label = layer.data(name='label', type=data_type.integer_value(10))
+        hidden = layer.fc(input=pixel,
+                          size=100,
+                          act=conf_helps.SigmoidActivation())
+        inference = layer.fc(input=hidden,
+                             size=10,
+                             act=conf_helps.SoftmaxActivation())
+        maxid = layer.max_id(input=inference)
+        cost1 = layer.classification_cost(input=inference, label=label)
+        cost2 = layer.cross_entropy_cost(input=inference, label=label)
+
+        topology.Topology(cost2).proto()
+        topology.Topology([cost1]).proto()
+        topology.Topology([cost1, cost2]).proto()
+        topology.Topology([inference, maxid]).proto()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/python/paddle/v2/topology.py b/python/paddle/v2/topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..737b6bf1e2eb60281d4d6e92667d9fe91e243704
--- /dev/null
+++ b/python/paddle/v2/topology.py
@@ -0,0 +1,124 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+from paddle.proto.ModelConfig_pb2 import ModelConfig
+
+import layer as v2_layer
+
+__all__ = ['Topology']
+
+
+def __flatten__(lis):
+    """
+    Given a list, possibly nested to any level, return it flattened.
+    """
+    new_lis = []
+    for item in lis:
+        if isinstance(item, collections.Sequence):
+            new_lis.extend(__flatten__(item))
+        else:
+            new_lis.append(item)
+    return new_lis
+
+
+def __bfs_travel__(callback, *layers):
+    layers = __flatten__(layers)
+    for each_layer in layers:
+        __break__ = callback(each_layer)
+        if __break__:
+            return
+        __layers__ = each_layer.__parent_layers__.values() + \
+                     each_layer.extra_parent()
+        __bfs_travel__(callback, *__layers__)
+
+
+class Topology(object):
+    """
+    Topology is used to store the information about all layers
+    and network configs.
+    """
+
+    def __init__(self, layers, extra_layers=None):
+        def __check__(layers):
+            if not isinstance(layers, collections.Sequence):
+                __check_layer_type__(layers)
+                layers = [layers]
+            for layer in layers:
+                __check_layer_type__(layer)
+            return layers
+
+        layers = __check__(layers)
+        self.layers = layers
+        if extra_layers is not None:
+            extra_layers = __check__(extra_layers)
+
+        self.__model_config__ = v2_layer.parse_network(
+            layers, extra_layers=extra_layers)
+
+        if extra_layers is not None:
+            self.layers.extend(extra_layers)
+
+        assert isinstance(self.__model_config__, ModelConfig)
+
+    def proto(self):
+        return self.__model_config__
+
+    def get_layer(self, name):
+        """
+        get v2.Layer Class instance by layer name
+        :param name:
+        :return:
+        """
+        result_layer = [None]
+
+        def __impl__(l):
+            if l.name == name:
+                result_layer[0] = l
+                return True  # break
+            return False
+
+        __bfs_travel__(__impl__, *self.layers)
+        if result_layer[0] is None:
+            raise ValueError("No such layer %s" % name)
+        return result_layer[0]
+
+    def data_layers(self):
+        """
+        get all data layer
+        :return:
+        """
+        data_layers = dict()
+
+        def __impl__(l):
+            if isinstance(l, v2_layer.DataLayerV2):
+                data_layers[l.name] = l
+
+        __bfs_travel__(__impl__, *self.layers)
+        return data_layers
+
+    def data_type(self):
+        """
+        get data_type from proto, such as:
+        [('image', dense_vector(768)), ('label', integer_value(10))]
+        """
+        data_layers = self.data_layers()
+        return [(nm, data_layers[nm].type)
+                for nm in self.proto().input_layer_names]
+
+
+def __check_layer_type__(layer):
+    if not isinstance(layer, v2_layer.LayerV2):
+        raise ValueError('layer should have type paddle.layer.Layer')
diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py
index 4365bd41e7073bce4112e5813dbf1517856c06f5..68b4967cc031dfa2dd164d822aff97585f923e48 100644
--- a/python/paddle/v2/trainer.py
+++ b/python/paddle/v2/trainer.py
@@ -1,15 +1,17 @@
+"""
+Module Trainer
+"""
 import collections
 
 import py_paddle.swig_paddle as api
-from paddle.proto.ModelConfig_pb2 import ModelConfig
-from py_paddle import DataProviderConverter
 
+from data_feeder import DataFeeder
+from topology import Topology
 from . import event as v2_event
-from . import layer as v2_layer
 from . import optimizer as v2_optimizer
 from . import parameters as v2_parameters
 
-__all__ = ['ITrainer', 'SGD']
+__all__ = ['SGD']
 
 
 def default_event_handler(event):
@@ -23,166 +25,149 @@ def default_event_handler(event):
     pass
 
 
-class ITrainer(object):
+class SGD(object):
     """
-    The interface of Trainer. The only exposed method is `train`.
+    Simple SGD Trainer.
+    SGD Trainer combines data reader, network topolopy and update_equation together
+    to train/test a neural network.
+
+    :param update_equation: The optimizer object.
+    :type update_equation: paddle.v2.optimizer.Optimizer
+    :param cost: Target cost that neural network should be optimized.
+    :type cost: paddle.v2.config_base.Layer
+    :param parameters: The parameters dictionary.
+    :type parameters: paddle.v2.parameters.Parameters
+    :param extra_layers: Some layers in the neural network graph are not
+                         in the path of cost layer.
+    :type extra_layers: paddle.v2.config_base.Layer
     """
 
-    def train(self,
-              train_data_reader,
-              topology,
-              parameters,
-              test_data_reader=None,
-              event_handler=None):
-        """
-        train method.
-
-        :param train_data_reader:
-        :param topology:
-        :param parameters:
-        :param test_data_reader:
-        :param event_handler:
-        :return:
-        """
-
-        raise NotImplementedError()
+    def __init__(self, cost, parameters, update_equation, extra_layers=None):
 
+        if not isinstance(parameters, v2_parameters.Parameters):
+            raise TypeError('parameters should be parameters')
 
-class SGD(ITrainer):
-    def __init__(self, update_equation):
-        """
-        Simple SGD Trainer.
-
-        :param update_equation: The optimizer object.
-        :type update_equation: v2_optimizer.Optimizer
-        """
         if not isinstance(update_equation, v2_optimizer.Optimizer):
-            raise ValueError("update equation parameter must be "
-                             "paddle.v2.optimizer.Optimizer")
+            raise TypeError("update equation parameter must be "
+                            "paddle.v2.optimizer.Optimizer")
+        topology = Topology(cost, extra_layers=extra_layers)
         self.__optimizer__ = update_equation
+        self.__topology__ = topology
+        self.__parameters__ = parameters
+        self.__topology_in_proto__ = topology.proto()
+
+        # In local mode, disable sparse_remote_update.
+        for param in self.__topology_in_proto__.parameters:
+            if param.sparse_remote_update:
+                param.sparse_remote_update = False
+
+        self.__data_types__ = topology.data_type()
+        gm = api.GradientMachine.createFromConfigProto(
+            self.__topology_in_proto__, api.CREATE_MODE_NORMAL,
+            self.__optimizer__.enable_types())
+        assert isinstance(gm, api.GradientMachine)
+        self.__gradient_machine__ = gm
+        self.__gradient_machine__.randParameters()
+        parameters.append_gradient_machine(gm)
 
-    def train(self,
-              train_data_reader,
-              topology,
-              parameters,
-              num_passes=1,
-              test_data_reader=None,
-              event_handler=None,
-              batch_size=32,
-              data_types=None):
+    def train(self, reader, num_passes=1, event_handler=None, feeding=None):
         """
         Training method. Will train num_passes of input data.
 
-        :param train_data_reader:
-        :param topology: Network Topology, use one or more Layers to represent it.
-        :param parameters: The parameter pools.
+        :param reader: A reader that reads and yeilds data items. Usually we use a
+                       batched reader to do mini-batch training.
+        :type reader: collections.Iterable
         :param num_passes: The total train passes.
-        :param test_data_reader:
         :param event_handler: Event handler. A method will be invoked when event
                               occurred.
         :type event_handler: (BaseEvent) => None
-        :param batch_size: Not important, will be removed after data refactor.
-        :param data_types: Not important, will be removed after data refactor.
+        :param feeding: Feeding is a map of neural network input name and array
+                        index that reader returns.
+        :type feeding: dict|list
         :return:
         """
         if event_handler is None:
             event_handler = default_event_handler
-
-        topology = v2_layer.parse_network(topology)
-
         __check_train_args__(**locals())
 
-        gm = api.GradientMachine.createFromConfigProto(
-            topology, api.CREATE_MODE_NORMAL, self.__optimizer__.enable_types())
-        assert isinstance(gm, api.GradientMachine)
-        parameters.append_gradient_machine(gm)
-
         updater = self.__optimizer__.create_local_updater()
-        updater.init(gm)
+        updater.init(self.__gradient_machine__)
 
-        gm.start()
+        self.__gradient_machine__.start()
+        batch_evaluator = self.__gradient_machine__.makeEvaluator()
+        assert isinstance(batch_evaluator, api.Evaluator)
+        pass_evaluator = self.__gradient_machine__.makeEvaluator()
+        assert isinstance(pass_evaluator, api.Evaluator)
         out_args = api.Arguments.createArguments(0)
-
-        data_types_lists = []
-        for each in topology.input_layer_names:
-            if each not in data_types:
-                raise ValueError()
-            data_types_lists.append(data_types[each])
-
-        converter = DataProviderConverter(input_types=data_types_lists)
-
+        feeder = DataFeeder(self.__data_types__, feeding)
         for pass_id in xrange(num_passes):
+            event_handler(v2_event.BeginPass(pass_id))
+            pass_evaluator.start()
             updater.startPass()
-            for batch_id, data_batch in enumerate(
-                    __data_reader_to_batch__(train_data_reader, batch_size,
-                                             topology)):
+            for batch_id, data_batch in enumerate(reader()):
+                batch_evaluator.start()
+                event_handler(
+                    v2_event.BeginIteration(
+                        pass_id=pass_id, batch_id=batch_id))
                 pass_type = updater.startBatch(len(data_batch))
-                gm.forwardBackward(converter(data_batch), out_args, pass_type)
-                for each_param in gm.getParameters():
+                self.__gradient_machine__.forwardBackward(
+                    feeder(data_batch), out_args, pass_type)
+                self.__gradient_machine__.eval(pass_evaluator)
+                self.__gradient_machine__.eval(batch_evaluator)
+                for each_param in self.__gradient_machine__.getNonStaticParameters(
+                ):
                     updater.update(each_param)
-                # Get cost. We use numpy to calculate total cost for this batch.
-                cost_vec = out_args.getSlotValue(0)
-                cost_vec = cost_vec.copyToNumpyMat()
-                cost = cost_vec.sum() / len(data_batch)
+                cost_sum = out_args.sum()
+                cost = cost_sum / len(data_batch)
                 updater.finishBatch(cost)
+                batch_evaluator.finish()
                 event_handler(
                     v2_event.EndIteration(
-                        pass_id=pass_id, batch_id=batch_id, cost=cost))
+                        pass_id=pass_id,
+                        batch_id=batch_id,
+                        cost=cost,
+                        evaluator=batch_evaluator))
 
             updater.finishPass()
-        gm.finish()
-
-
-def __data_reader_to_batch__(reader, batch_size, topology):
-    """
-    This function is not important, and will be removed when data refactored.
-    """
-
-    def input_reorder(func):
-        for item in func():
-            retv = []
-            for __layer_name__ in topology.input_layer_names:
-                retv.append(item[__layer_name__])
-            yield retv
-
-    return __generator_to_batch__(input_reorder(reader), batch_size=batch_size)
+            pass_evaluator.finish()
+            event_handler(v2_event.EndPass(pass_id, evaluator=pass_evaluator))
+        self.__gradient_machine__.finish()
 
+    def test(self, reader, feeding=None):
+        """
+        Testing method. Will test input data.
 
-def __generator_to_batch__(generator, batch_size):
-    """
-    This function is not important, and will be removed when data refactored.
-    """
-    ret_val = list()
-    for each_item in generator:
-        ret_val.append(each_item)
-        if len(ret_val) == batch_size:
-            yield ret_val
-            ret_val = list()
-    if len(ret_val) != 0:
-        yield ret_val
-
-
-def __check_train_args__(train_data_reader, topology, parameters,
-                         test_data_reader, event_handler, **kwargs):
+        :param reader: A reader that reads and yeilds data items.
+        :type reader: collections.Iterable  
+        :param feeding: Feeding is a map of neural network input name and array
+                        index that reader returns.
+        :type feeding: dict
+        :return:
+        """
+        feeder = DataFeeder(self.__data_types__, feeding)
+        evaluator = self.__gradient_machine__.makeEvaluator()
+        out_args = api.Arguments.createArguments(0)
+        evaluator.start()
+        total_cost = 0
+        num_samples = 0.0
+        for data_batch in reader():
+            num_samples += len(data_batch)
+            self.__gradient_machine__.forward(
+                feeder(data_batch), out_args, api.PASS_TEST)
+            total_cost += out_args.sum()
+            self.__gradient_machine__.eval(evaluator)
+
+        evaluator.finish()
+        return v2_event.TestResult(
+            evaluator=evaluator, cost=total_cost / num_samples)
+
+
+def __check_train_args__(reader, event_handler, **kwargs):
     """
     Check train function's argument types
     """
-    if not callable(train_data_reader) or not isinstance(train_data_reader(),
-                                                         collections.Iterator):
-        raise ValueError('train_data_reader should be a function, '
-                         'which can return a iterator')
-
-    if test_data_reader is not None:
-        if not callable(test_data_reader) or not isinstance(
-                test_data_reader(), collections.Iterator):
-            raise ValueError('test_data_reader should be a function, which can '
-                             'return a iterator')
-
-    if not isinstance(topology, ModelConfig):
-        raise ValueError('topology should be a model config')
-
-    if not isinstance(parameters, v2_parameters.Parameters):
-        raise ValueError('parameters should be a parameter pool')
-
+    if not callable(reader) or not isinstance(reader(), collections.Iterator):
+        raise TypeError('train_data_reader should be a function, '
+                        'which can return a iterator')
     if not callable(event_handler):
-        raise ValueError('event handler should be a function')
+        raise TypeError('event handler should be a function')
diff --git a/python/setup.py.in b/python/setup.py.in
index 1e1324eea825ab1945a38cb43eceec29a4ebb1a1..4ac35e3b8d6049a9024a6e0c9bb6804900f82197 100644
--- a/python/setup.py.in
+++ b/python/setup.py.in
@@ -5,7 +5,10 @@ packages=['paddle',
           'paddle.trainer',
           'paddle.trainer_config_helpers',
           'paddle.utils',
-          'paddle.v2']
+          'paddle.v2',
+          'paddle.v2.dataset',
+          'paddle.v2.reader',
+          'paddle.v2.plot']
 
 setup(name='paddle',
       version='${PADDLE_VERSION}',