diff --git a/.gitignore b/.gitignore
index 9622ab78e0e0556ec2b4cc974fee93ff680d54d2..4f21fefda9f64a0392881971a715b97c234030e3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,6 +22,7 @@ cmake-build-*
# generated while compiling
python/paddle/v2/framework/core.so
+paddle/pybind/pybind.h
CMakeFiles
cmake_install.cmake
paddle/.timestamp
diff --git a/.travis.yml b/.travis.yml
index b4b83fcdbc84ce0fb0c91c816ebc3c964acfa590..e217c8f5a740ef5ab7315656ed7839ffa219c805 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,6 @@ cache:
- $HOME/.ccache
- $HOME/.cache/pip
- $TRAVIS_BUILD_DIR/build/third_party
- - $TRAVIS_BUILD_DIR/build_android/third_party
sudo: required
dist: trusty
os:
@@ -12,7 +11,6 @@ os:
env:
- JOB=build_doc
- JOB=check_style
- - JOB=build_android
addons:
apt:
packages:
@@ -23,7 +21,6 @@ addons:
- python
- python-pip
- python2.7-dev
- - python-numpy
- python-wheel
- libboost-dev
- curl
@@ -37,8 +34,8 @@ before_install:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version.
- - pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- - pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
+ - sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
+ - sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
- curl https://glide.sh/get | bash
- eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
- go get -u github.com/alecthomas/gometalinter
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ad559672ad2f83a3d62cdf332b47c6cf1e730f70..5739c2a26039426ab544f762e9401445f01e7de7 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -65,8 +65,11 @@ if(NOT CMAKE_BUILD_TYPE)
endif()
if(ANDROID)
- if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
- message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 21")
+ if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
+ message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
+ elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
+ # TODO: support glog for Android api 16 ~ 19 in the future
+ message(WARNING "Using the unofficial git repository instead")
endif()
set(WITH_GPU OFF CACHE STRING
diff --git a/Dockerfile.android b/Dockerfile.android
index c0fa58c384f9ebcae60477ffce49ea4ffa929db9..9d13a414f67be04e17b7d83403228d92bce0eda9 100644
--- a/Dockerfile.android
+++ b/Dockerfile.android
@@ -4,9 +4,16 @@ MAINTAINER PaddlePaddle Authors
ARG UBUNTU_MIRROR
RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
+# ENV variables
+ARG ANDROID_ABI
+ARG ANDROID_API
+
+ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"}
+ENV ANDROID_API=${ANDROID_API:-21}
+
ENV HOME=/root \
ANDROID_NDK_HOME=/opt/android-ndk-linux \
- ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain-gcc
+ ANDROID_TOOLCHAINS_DIR=/opt/toolchains
RUN apt-get update && \
apt-get install -y \
@@ -15,12 +22,11 @@ RUN apt-get update && \
apt-get clean -y
# Install Go and glide
-RUN wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \
- tar -C /usr/local -xzf go.tgz && \
+RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
+ tar -xz -C /usr/local && \
mkdir /root/gopath && \
mkdir /root/gopath/bin && \
- mkdir /root/gopath/src && \
- rm go.tgz
+ mkdir /root/gopath/src
ENV GOROOT=/usr/local/go GOPATH=/root/gopath
# should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT.
ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
@@ -37,13 +43,12 @@ RUN pip install --upgrade pip && \
pip install pre-commit
# Android NDK
-RUN mkdir /opt/android-ndk-tmp && \
+RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \
+ mkdir -p /opt/android-ndk-tmp && \
cd /opt/android-ndk-tmp && \
wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \
unzip -q android-ndk-r14b-linux-x86_64.zip && \
mv android-ndk-r14b ${ANDROID_NDK_HOME} && \
- ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-21 --install-dir=${ANDROID_STANDALONE_TOOLCHAIN} && \
- rm -rf /opt/android-ndk-tmp && \
- rm -rf ${ANDROID_NDK_HOME}
+ rm -rf /opt/android-ndk-tmp
CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"]
diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake
index 8d5d533126c9b7fa84c725d614cf3486126d0284..4823dc3e91390002aefac70f7931b4197db05789 100644
--- a/cmake/cpplint.cmake
+++ b/cmake/cpplint.cmake
@@ -26,9 +26,9 @@ set(IGNORE_PATTERN
.*ImportanceSampler.*
.*cblas\\.h.*
.*\\.pb\\.txt
- .*LtrDataProvider.*
.*MultiDataProvider.*
- .*pb.*)
+ .*pb.*
+ .*pybind.h)
# add_style_check_target
#
diff --git a/cmake/cross_compiling/android.cmake b/cmake/cross_compiling/android.cmake
index 5e3e437a8da9624df35a5c754fe77be73f20361d..84219cfa5587f5b765b2e8f35180797d7053169f 100644
--- a/cmake/cross_compiling/android.cmake
+++ b/cmake/cross_compiling/android.cmake
@@ -20,6 +20,7 @@
# The supported variables are listed belows:
#
# ANDROID_STANDALONE_TOOLCHAIN
+# ANDROID_TOOLCHAIN
# ANDROID_ABI
# ANDROID_NATIVE_API_LEVEL
# ANDROID_ARM_MODE
@@ -57,6 +58,10 @@ IF(NOT DEFINED CMAKE_SYSTEM_VERSION AND ANDROID_NATIVE_API_LEVEL)
ENDIF()
ENDIF()
+IF(NOT DEFINED ANDROID_TOOLCHAIN)
+ SET(ANDROID_TOOLCHAIN clang)
+ENDIF()
+
IF(NOT DEFINED ANDROID_ABI)
SET(ANDROID_ABI "armeabi-v7a")
ENDIF()
@@ -82,6 +87,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
"${CMAKE_VERSION}), when cross-compiling for Android.")
IF(ANDROID_STANDALONE_TOOLCHAIN)
+ # Use standalone toolchain
SET(CMAKE_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot")
IF(NOT CMAKE_SYSTEM_VERSION)
@@ -96,26 +102,44 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
ENDIF()
# Toolchain
- SET(ANDROID_TOOLCHAIN "gcc")
SET(ANDROID_TOOLCHAIN_ROOT ${ANDROID_STANDALONE_TOOLCHAIN})
- IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
- SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi)
- IF(ANDROID_ABI STREQUAL "armeabi")
- SET(CMAKE_SYSTEM_PROCESSOR armv5te)
- ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
- SET(CMAKE_SYSTEM_PROCESSOR armv7-a)
- ENDIF()
- ENDIF()
- IF(ANDROID_ABI STREQUAL "arm64-v8a")
- SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android)
- SET(CMAKE_SYSTEM_PROCESSOR aarch64)
+ ELSE(ANDROID_NDK)
+ # TODO: use android ndk
+ ENDIF()
+
+ IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
+ SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi)
+ IF(ANDROID_ABI STREQUAL "armeabi")
+ SET(CMAKE_SYSTEM_PROCESSOR armv5te)
+ SET(ANDROID_CLANG_TRIPLE armv5te-none-linux-androideabi)
+ ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
+ SET(CMAKE_SYSTEM_PROCESSOR armv7-a)
+ SET(ANDROID_CLANG_TRIPLE armv7-none-linux-androideabi)
ENDIF()
- SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-")
+ ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
+ SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android)
+ SET(CMAKE_SYSTEM_PROCESSOR aarch64)
+ SET(ANDROID_CLANG_TRIPLE aarch64-none-linux-android)
+ ELSE()
+ MESSAGE(FATAL_ERROR "Invalid Android ABI: ${ANDROID_ABI}.")
+ ENDIF()
+ SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-")
+
+ IF(ANDROID_TOOLCHAIN STREQUAL clang)
+ SET(ANDROID_C_COMPILER_NAME clang)
+ SET(ANDROID_CXX_COMPILER_NAME clang++)
+ SET(CMAKE_C_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE})
+ SET(CMAKE_CXX_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE})
+ ELSEIF(ANDROID_TOOLCHAIN STREQUAL gcc)
+ SET(ANDROID_C_COMPILER_NAME gcc)
+ SET(ANDROID_CXX_COMPILER_NAME g++)
+ ELSE()
+ MESSAGE(FATAL_ERROR "Invalid Android toolchain: ${ANDROID_TOOLCHAIN}")
ENDIF()
# C compiler
IF(NOT CMAKE_C_COMPILER)
- SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}gcc")
+ SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_C_COMPILER_NAME}")
ELSE()
GET_FILENAME_COMPONENT(ANDROID_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM)
ENDIF()
@@ -125,7 +149,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
# CXX compiler
IF(NOT CMAKE_CXX_COMPILER)
- SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}g++")
+ SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_CXX_COMPILER_NAME}")
ELSE()
GET_FILENAME_COMPONENT(ANDROID_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM)
ENDIF()
@@ -137,7 +161,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
SET(CMAKE_CXX_COMPILER ${ANDROID_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE)
# Toolchain and ABI specific flags.
- SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections -finline-limit=64")
+ SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections")
SET(ANDROID_LINKER_FLAGS "-Wl,--gc-sections")
IF(ANDROID_ABI STREQUAL "armeabi")
@@ -145,8 +169,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
-march=armv5te
-mtune=xscale
-msoft-float)
- ENDIF()
- IF(ANDROID_ABI STREQUAL "armeabi-v7a")
+ ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
LIST(APPEND ANDROID_COMPILER_FLAGS
-march=armv7-a
-mfloat-abi=softfp)
@@ -156,6 +179,8 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
LIST(APPEND ANDROID_COMPILER_FLAGS -mfpu=vfpv3-d16)
ENDIF()
LIST(APPEND ANDROID_LINKER_FLAGS -Wl,--fix-cortex-a8)
+ ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
+ LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a)
ENDIF()
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
@@ -164,10 +189,18 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
ELSE()
LIST(APPEND ANDROID_COMPILER_FLAGS -mthumb)
ENDIF()
+ IF(ANDROID_TOOLCHAIN STREQUAL clang)
+ # Disable integrated-as for better compatibility.
+ LIST(APPEND ANDROID_COMPILER_FLAGS -fno-integrated-as)
+ ENDIF()
ENDIF()
- IF(ANDROID_ABI STREQUAL "arm64-v8a")
- LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a)
+ IF(ANDROID_TOOLCHAIN STREQUAL clang)
+ # CMake automatically forwards all compiler flags to the linker,
+ # and clang doesn't like having -Wa flags being used for linking.
+ # To prevent CMake from doing this would require meddling with
+ # the CMAKE__COMPILE_OBJECT rules, which would get quite messy.
+ LIST(APPEND ANDROID_LINKER_FLAGS -Qunused-arguments)
ENDIF()
STRING(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}")
diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake
index 16e5bef4cdb8d6513de51838e3c3c8398dbad60d..01a2f4d5fa357ca882162247cc52299a3d1d3030 100644
--- a/cmake/external/gflags.cmake
+++ b/cmake/external/gflags.cmake
@@ -18,9 +18,9 @@ SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags)
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
IF(WIN32)
- set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
+ set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ELSE(WIN32)
- set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
+ set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
@@ -56,3 +56,12 @@ SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
ADD_DEPENDENCIES(gflags extern_gflags)
LIST(APPEND external_project_dependencies gflags)
+
+IF(WITH_C_API)
+ INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags)
+ IF(ANDROID)
+ INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib)
+ ENDIF()
+ENDIF()
diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake
index 8a594a825abdca6a0f989b94fa42f97d6df5e10a..b450a3016667dcb4ab229fe7ec8aaae8609d8171 100644
--- a/cmake/external/glog.cmake
+++ b/cmake/external/glog.cmake
@@ -19,9 +19,9 @@ SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog)
SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE)
IF(WIN32)
- SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
+ SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
ELSE(WIN32)
- SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
+ SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
@@ -56,3 +56,12 @@ ADD_DEPENDENCIES(glog extern_glog gflags)
LINK_LIBRARIES(glog gflags)
LIST(APPEND external_project_dependencies glog)
+
+IF(WITH_C_API)
+ INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog)
+ IF(ANDROID)
+ INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib)
+ ENDIF()
+ENDIF()
diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake
index 0002a470d90f722e3f9106ca56d70e6bf2cea339..4fc8d43fc10891603b79c01a1c769cae21c52655 100644
--- a/cmake/external/openblas.cmake
+++ b/cmake/external/openblas.cmake
@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+IF(USE_EIGEN_FOR_BLAS)
+ return()
+ENDIF(USE_EIGEN_FOR_BLAS)
+
INCLUDE(cblas)
IF(NOT ${CBLAS_FOUND})
@@ -69,6 +73,26 @@ IF(NOT ${CBLAS_FOUND})
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
)
+
+ IF(WITH_C_API)
+ INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas)
+ # Because libopenblas.a is a symbolic link of another library, thus need to
+ # install the whole directory.
+ IF(ANDROID)
+ SET(TMP_INSTALL_DIR third_party/openblas/lib/${ANDROID_ABI})
+ ELSE()
+ SET(TMP_INSTALL_DIR third_party/openblas/lib)
+ ENDIF()
+ INSTALL(CODE "execute_process(
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${CBLAS_INSTALL_DIR}/lib
+ destination ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}
+ )"
+ )
+ INSTALL(CODE "MESSAGE(STATUS \"Installing: \"
+ \"${CBLAS_INSTALL_DIR}/lib -> ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}\"
+ )"
+ )
+ ENDIF()
ENDIF(NOT ${CBLAS_FOUND})
MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}")
diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake
index e629d61585c2d2ff916187ee28d4fd089a5bd857..a887be2e2ae5e21562fc15c775bb24cc1553480e 100644
--- a/cmake/external/protobuf.cmake
+++ b/cmake/external/protobuf.cmake
@@ -223,6 +223,15 @@ IF(NOT PROTOBUF_FOUND)
SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY}
CACHE FILEPATH "protoc library." FORCE)
+ IF(WITH_C_API)
+ INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf)
+ IF(ANDROID)
+ INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib)
+ ENDIF()
+ ENDIF()
+
IF(CMAKE_CROSSCOMPILING)
PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf)
ELSE()
diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake
index 45ca5542b7dc30216b45487782f849b93c5f8fca..5aecab90ca3cecdfdba0eac178a6ba07dfcb8745 100644
--- a/cmake/external/zlib.cmake
+++ b/cmake/external/zlib.cmake
@@ -49,3 +49,12 @@ ExternalProject_Add(
)
LIST(APPEND external_project_dependencies zlib)
+
+IF(WITH_C_API)
+ INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib)
+ IF(ANDROID)
+ INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib)
+ ENDIF()
+ENDIF()
diff --git a/doc/design/functions_operators_layers.md b/doc/design/functions_operators_layers.md
index 7a2e8fd0ace2e3f4462b15215de22c31e944b7cb..d23ba56b5773a36d448a99e4abdebc1475ed789c 100644
--- a/doc/design/functions_operators_layers.md
+++ b/doc/design/functions_operators_layers.md
@@ -86,12 +86,13 @@ def layer.fc(X):
We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example:
-```
+
| C++ functions/functors | mul | add | | |
+|------------------------|--------------|--------------|-------------|----------|
| C++ operator class | mulOp | addOp | FCOp | |
| Python binding | operator.mul | operator.add | operator.fc | |
| Python function | | | | layer.fc |
-```
+
This is how we differentiate layer and operators in PaddlePaddle:
diff --git a/doc/design/ops/dist_train.md b/doc/design/ops/dist_train.md
new file mode 100644
index 0000000000000000000000000000000000000000..fa3c5d7990213cf2b0d236e66e592dd2699da876
--- /dev/null
+++ b/doc/design/ops/dist_train.md
@@ -0,0 +1,106 @@
+# Design Doc: Operation Graph Based Parameter Server
+
+## Abstract
+
+We propose an approach to implement the parameter server. In this
+approach, there is no fundamental difference between the trainer and
+the parameter server: they both run subgraphs, but subgraphs of
+different purposes.
+
+## Background
+
+The previous implementations of the parameter server does not run a
+subgraph. parameter initialization, optimizer computation, network
+communication and checkpointing are implemented twice on both the
+trainer and the parameter server.
+
+It would be great if we can write code once and use them on both the
+trainer and the parameter server: reduces code duplication and
+improves extensibility. Given that after the current refactor, we are
+representing everything as a computing graph on the
+trainer. Representing everything as a computing graph on the parameter
+server becomes a natural extension.
+
+## Design
+
+### Graph Converter
+
+The *graph converter* converts the user-defined operation (OP) graph
+into subgraphs to be scheduled on different nodes with the following
+steps:
+
+1. OP placement: the OPs will be placed on different nodes according
+ to heuristic that minimizes estimated total computation
+ time. Currently we will use a simple heuristic that puts parameter
+ varable on parameter server workers and everything else on trainer
+ workers.
+
+1. Add communication OPs to enable the communication between nodes.
+
+We will need these OPs: *Send*, *Recv*, *Enqueue*, *Dequeue*.
+
+Below is an example of converting the user defined graph to the
+subgraphs for the trainer and the parameter server:
+
+
+
+After converting:
+
+
+
+1. The parameter variable W and it's optimizer subgraph are placed on the parameter server.
+1. Operators are added to the subgraphs.
+ - *Send* sends data to the connected *Recv* operator. The
+ scheduler on the receive node will only schedule *Recv* operator
+ to run when the *Send* operator has ran (the *Send* OP will mark
+ the *Recv* OP runnable automatically).
+ - *Enueue* enqueues the input variable, it can block until space
+ become available in the queue.
+ - *Dequeue* outputs configurable numbers of tensors from the
+ queue. It will block until the queue have the required number of
+ tensors.
+
+
+### Benefits
+
+- Model parallelism become easier to implement: it's an extension to
+ the trainer - parameter server approach. we already have the
+ communication OPs, but need to extend the graph converter's
+ placement functionality.
+
+- User-defined optimizer is easier to add - user can now express it as
+ a subgraph.
+
+- No more duplication logic inside the trainer and the parameter
+ server mentioned in the background section.
+
+### Challenges
+
+- It might be hard for the graph converter to cut a general graph
+ (without any hint for which subgraph is the optimizer). We may need
+ to label which subgraph inside the OP graph is the optimizer.
+
+- It's important to balance the parameter shards of on multiple
+ parameter server. If a single parameter is very big (some
+ word-embedding, fully connected, softmax layer), we need to
+ automatically partition the single parameter onto different
+ parameter servers when possible (only element-wise optimizer depends
+ on the parameter variable).
+
+### Discussion
+
+- In the "Aync SGD" figure, the "W" variable on the parameter server
+ could be read and wrote concurrently, what is our locking strategy?
+ E.g., each variable have a lock cpp method to be invoked by every
+ OP, or, have a lock OP.
+
+- Can the Enqueue OP be implemented under our current tensor design
+ (puts the input tensor into the queue tensor)?
+
+- *Dequeue* OP will have variable numbers of output (depends on the
+ `min_count` attribute), does our current design support it? (similar
+ question for the *Add* OP)
+
+
+### References:
+[1] [TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
diff --git a/doc/design/ops/images/2_level_rnn.dot b/doc/design/ops/images/2_level_rnn.dot
new file mode 100644
index 0000000000000000000000000000000000000000..a498e882a3d85a33d44dbad7474fa2a340e33976
--- /dev/null
+++ b/doc/design/ops/images/2_level_rnn.dot
@@ -0,0 +1,56 @@
+digraph G {
+
+ rnn [label="1-th level RNN" shape=box]
+
+ subgraph cluster0 {
+ label = "time step 0"
+
+ sent0 [label="sentence"]
+ sent1 [label="sentence"]
+
+ rnn1 [label="2-th level RNN" shape=box]
+
+ sent0 -> rnn1
+ sent1 -> rnn1
+ }
+
+ subgraph cluster1 {
+ label = "time step 1"
+
+ sent2 [label="sentence"]
+ sent3 [label="sentence"]
+
+ rnn2 [label="2-th level RNN" shape=box]
+
+ sent2 -> rnn2
+ sent3 -> rnn2
+ }
+
+ subgraph cluster2 {
+ label = "time step 2"
+
+ sent4 [label="sentence"]
+ sent5 [label="sentence"]
+
+ rnn3 [label="2-th level RNN" shape=box]
+
+ sent4 -> rnn3
+ sent5 -> rnn3
+ }
+
+
+ para0 [label="paragraph info 0"]
+ para1 [label="paragraph info 1"]
+ para2 [label="paragraph info 2"]
+
+ rnn1 -> para0
+ rnn2 -> para1
+ rnn3 -> para2
+
+ para0 -> rnn
+ para1 -> rnn
+ para2 -> rnn
+
+ chapter [label="chapter info"]
+ rnn -> chapter
+}
diff --git a/doc/design/ops/images/2_level_rnn.png b/doc/design/ops/images/2_level_rnn.png
new file mode 100644
index 0000000000000000000000000000000000000000..0537a75beb175c0c284717421f7aa908da2a5038
Binary files /dev/null and b/doc/design/ops/images/2_level_rnn.png differ
diff --git a/doc/design/ops/images/rnn.dot b/doc/design/ops/images/rnn.dot
new file mode 100644
index 0000000000000000000000000000000000000000..c1141cd9c981bb3cbf50d8bf7a6ed210280d79a5
--- /dev/null
+++ b/doc/design/ops/images/rnn.dot
@@ -0,0 +1,87 @@
+digraph G {
+ label = "simple RNN implementation"
+
+ ranksep=2;
+
+ //graph [nodesep=1, ranksep=1];
+
+ node[nodesep=1]
+
+ subgraph cluster0 {
+ label = "global scope"
+ rankdir = TB
+ W
+ boot_memory
+ input
+ output
+ }
+
+ subgraph cluster1 {
+ label = "step-scope 0"
+ rankdir = TB
+ memory0[label="memory"]
+ prememory0[label="pre-memory"]
+ step_input0[label="step input"]
+ step_output0[label="step output"]
+ }
+
+ subgraph cluster2 {
+ label = "step-scope 1"
+ rankdir = TB
+ memory1[label="memory"]
+ prememory1[label="pre-memory"]
+ step_input1[label="step input"]
+ step_output1[label="step output"]
+ }
+
+ subgraph cluster3 {
+ label = "step-scope 2"
+ rankdir = TB
+ memory2[label="memory"]
+ prememory2[label="pre-memory"]
+ step_input2[label="step input"]
+ step_output2[label="step output"]
+ }
+
+ stepnet [shape=box]
+ stepnet0 [shape=box, style=dashed]
+ stepnet1 [shape=box, style=dashed]
+ stepnet2 [shape=box, style=dashed]
+
+
+ edge[color=blue]
+ boot_memory -> prememory0 [label="init" color="blue"]
+ memory0 -> prememory1 [label="copy/reference" color="blue"]
+ memory1 -> prememory2 [label="copy/reference" color="blue"]
+
+ edge[color=black]
+ W -> stepnet0[constraint=false, style=dashed]
+ W -> stepnet1[constraint=false, style=dashed]
+ W -> stepnet2[constraint=false, style=dashed]
+
+ memory0 -> stepnet0[style=dashed]
+ prememory0 -> stepnet0 -> step_output0[style=dashed]
+
+ memory1 -> stepnet1[style=dashed]
+ prememory1 -> stepnet1 -> step_output1[style=dashed]
+
+ memory2 -> stepnet2[style=dashed]
+ prememory2 -> stepnet2 -> step_output2[style=dashed]
+
+ input -> step_input0
+ input -> step_input1
+ input -> step_input2
+
+ step_input0 -> stepnet0 [style=dashed]
+ step_input1 -> stepnet1[style=dashed]
+ step_input2 -> stepnet2[style=dashed]
+
+ step_output0 -> output
+ step_output1 -> output
+ step_output2 -> output
+
+ stepnet0 -> stepnet[style=dashed]
+ stepnet1 -> stepnet[style=dashed]
+ stepnet2 -> stepnet[style=dashed]
+
+}
diff --git a/doc/design/ops/images/rnn.jpg b/doc/design/ops/images/rnn.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9867e404cf959df0dce6ded5222b466c788fb840
Binary files /dev/null and b/doc/design/ops/images/rnn.jpg differ
diff --git a/doc/design/ops/images/rnn.png b/doc/design/ops/images/rnn.png
new file mode 100644
index 0000000000000000000000000000000000000000..e139e373fe8396782044cfd936fdde624f8c66fe
Binary files /dev/null and b/doc/design/ops/images/rnn.png differ
diff --git a/doc/design/ops/images/rnn_2level_data.dot b/doc/design/ops/images/rnn_2level_data.dot
new file mode 100644
index 0000000000000000000000000000000000000000..1d85ae2617a915ad0ad8288d848b607cc37ad297
--- /dev/null
+++ b/doc/design/ops/images/rnn_2level_data.dot
@@ -0,0 +1,75 @@
+digraph G {
+ chapter [label="chapter"]
+
+ subgraph cluster0 {
+ label = "paragraph 0"
+
+ top_rnn0[label="top rnn step 0" shape=box]
+
+ p0 [label="paragraph 0"]
+ p1 [label="paragraph 1"]
+ }
+
+ subgraph cluster1{
+ label = "paragraph 1"
+
+ top_rnn1[label="top rnn step 1" shape=box]
+
+ p2 [label="paragraph 0"]
+ p3 [label="paragraph 1"]
+ }
+
+ subgraph cluster_p0 {
+ label = "sentence 0"
+
+ low_rnn0 [label="low rnn step 0" shape=box]
+ s00 [label="sentence 0"]
+ s01 [label="sentence 1"]
+
+ low_rnn0 -> s00
+ low_rnn0 -> s01
+ }
+
+ subgraph cluster_p1 {
+ label = "sentence 1"
+ low_rnn1 [label="low rnn step 1" shape=box]
+ s10 [label="sentence 0"]
+ s11 [label="sentence 1"]
+ low_rnn1 -> s10
+ low_rnn1 -> s11
+ }
+
+ subgraph cluster_p2 {
+ label = "sentence 1"
+ low_rnn2 [label="low rnn step 0" shape=box]
+ s20 [label="sentence 0"]
+ s21 [label="sentence 1"]
+ low_rnn2 -> s20
+ low_rnn2 -> s21
+ }
+
+ subgraph cluster_p3 {
+ label = "sentence 1"
+ low_rnn3 [label="low rnn step 1" shape=box]
+ s30 [label="sentence 0"]
+ s31 [label="sentence 1"]
+ low_rnn3 -> s30
+ low_rnn3 -> s31
+ }
+
+
+ chapter -> top_rnn0
+ chapter -> top_rnn1
+
+ top_rnn0 -> p0
+ top_rnn0 -> p1
+ top_rnn1 -> p2
+ top_rnn1 -> p3
+
+
+ p0 -> low_rnn0
+ p1 -> low_rnn1
+ p2 -> low_rnn2
+ p3 -> low_rnn3
+
+}
diff --git a/doc/design/ops/images/rnn_2level_data.png b/doc/design/ops/images/rnn_2level_data.png
new file mode 100644
index 0000000000000000000000000000000000000000..4be81b2430717a6a506342a09fc26899568574c6
Binary files /dev/null and b/doc/design/ops/images/rnn_2level_data.png differ
diff --git a/doc/design/ops/rnn.md b/doc/design/ops/rnn.md
new file mode 100644
index 0000000000000000000000000000000000000000..a78eea7d45e9e9553d153170aa31da55ec6e8289
--- /dev/null
+++ b/doc/design/ops/rnn.md
@@ -0,0 +1,153 @@
+# RNNOp design
+
+This document is about an RNN operator which requires that instances in a mini-batch have the same length. We will have a more flexible RNN operator.
+
+## RNN Algorithm Implementation
+
+
+
+
+
+The above diagram shows an RNN unrolled into a full network.
+
+There are several important concepts:
+
+- *step-net*: the sub-graph to run at each step,
+- *memory*, $h_t$, the state of the current step,
+- *ex-memory*, $h_{t-1}$, the state of the previous step,
+- *initial memory value*, the ex-memory of the first step.
+
+### Step-scope
+
+There could be local variables defined in step-nets. PaddlePaddle runtime realizes these variables in *step-scopes* -- scopes created for each step.
+
+
+
+Figure 2 the RNN's data flow
+
+
+Please be aware that all steps run the same step-net. Each step
+
+1. creates the step-scope,
+2. realizes local variables, including step-outputs, in the step-scope, and
+3. runs the step-net, which could use these variables.
+
+The RNN operator will compose its output from step outputs in step scopes.
+
+### Memory and Ex-memory
+
+Let's give more details about memory and ex-memory via a simply example:
+
+$$
+h_t = U h_{t-1} + W x_t
+$$,
+
+where $h_t$ and $h_{t-1}$ are the memory and ex-memory of step $t$'s respectively.
+
+In the implementation, we can make an ex-memory variable either "refers to" the memory variable of the previous step,
+or copy the value of the previous memory value to the current ex-memory variable.
+
+### Usage in Python
+
+For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md).
+
+We can define an RNN's step-net using Block:
+
+```python
+import paddle as pd
+
+X = some_op() # x is some operator's output, and is a LoDTensor
+a = some_op()
+
+# declare parameters
+W = pd.Variable(shape=[20, 30])
+U = pd.Variable(shape=[20, 30])
+
+rnn = pd.create_rnn_op(output_num=1)
+with rnn.stepnet():
+ x = rnn.add_input(X)
+ # declare a memory (rnn's step)
+ h = rnn.add_memory(init=a)
+ # h.pre_state() means previous memory of rnn
+ new_state = pd.add_two( pd.matmul(W, x) + pd.matmul(U, h.pre_state()))
+ # update current memory
+ h.update(new_state)
+ # indicate that h variables in all step scopes should be merged
+ rnn.add_outputs(h)
+
+out = rnn()
+```
+
+Python API functions in above example:
+
+- `rnn.add_input` indicates the parameter is a variable that will be segmented into step-inputs.
+- `rnn.add_memory` creates a variable used as the memory.
+- `rnn.add_outputs` mark the variables that will be concatenated across steps into the RNN output.
+
+### Nested RNN and LoDTensor
+
+An RNN whose step-net includes other RNN operators is known as an *nested RNN*.
+
+For example, we could have a 2-level RNN, where the top level corresponds to paragraphs, and the lower level corresponds to sentences.
+
+The following figure illustrates the feeding of text into the lower level, one sentence each step, and the feeding of step outputs to the top level. The final top level output is about the whole text.
+
+
+
+
+
+```python
+import paddle as pd
+
+W = pd.Variable(shape=[20, 30])
+U = pd.Variable(shape=[20, 30])
+
+W0 = pd.Variable(shape=[20, 30])
+U0 = pd.Variable(shape=[20, 30])
+
+# a is output of some op
+a = some_op()
+
+# chapter_data is a set of 128-dim word vectors
+# the first level of LoD is sentence
+# the second level of LoD is chapter
+chapter_data = pd.Variable(shape=[None, 128], type=pd.lod_tensor, level=2)
+
+def lower_level_rnn(paragraph):
+ '''
+ x: the input
+ '''
+ rnn = pd.create_rnn_op(output_num=1)
+ with rnn.stepnet():
+ sentence = rnn.add_input(paragraph, level=0)
+ h = rnn.add_memory(shape=[20, 30])
+ h.update(
+ pd.matmul(W, sentence) + pd.matmul(U, h.pre_state()))
+ # get the last state as sentence's info
+ rnn.add_outputs(h)
+ return rnn
+
+top_level_rnn = pd.create_rnn_op(output_num=1)
+with top_level_rnn.stepnet():
+ paragraph_data = rnn.add_input(chapter_data, level=1)
+ low_rnn = lower_level_rnn(paragraph_data)
+ paragraph_out = low_rnn()
+
+ h = rnn.add_memory(init=a)
+ h.update(
+ pd.matmul(W0, paragraph_data) + pd.matmul(U0, h.pre_state()))
+ top_level_rnn.add_outputs(h)
+
+# just output the last step
+chapter_out = top_level_rnn(output_all_steps=False)
+```
+
+in above example, the construction of the `top_level_rnn` calls `lower_level_rnn`. The input is a LoD Tensor. The top level RNN segments input text data into paragraphs, and the lower level RNN segments each paragraph into sentences.
+
+By default, the `RNNOp` will concatenate the outputs from all the time steps,
+if the `output_all_steps` set to False, it will only output the final time step.
+
+
+
+
+
diff --git a/doc/design/ops/src/dist-graph.graffle b/doc/design/ops/src/dist-graph.graffle
new file mode 100644
index 0000000000000000000000000000000000000000..941399c6ced8d5f65b6c595522b770c88259df4b
Binary files /dev/null and b/doc/design/ops/src/dist-graph.graffle differ
diff --git a/doc/design/ops/src/dist-graph.png b/doc/design/ops/src/dist-graph.png
new file mode 100644
index 0000000000000000000000000000000000000000..3546b09f1c2ee3e4f60f519d5e47f823f08051a7
Binary files /dev/null and b/doc/design/ops/src/dist-graph.png differ
diff --git a/doc/design/ops/src/local-graph.graffle b/doc/design/ops/src/local-graph.graffle
new file mode 100644
index 0000000000000000000000000000000000000000..19e509bd9af3c1e9a3f5e0f16ddd281457a339c5
Binary files /dev/null and b/doc/design/ops/src/local-graph.graffle differ
diff --git a/doc/design/ops/src/local-graph.png b/doc/design/ops/src/local-graph.png
new file mode 100644
index 0000000000000000000000000000000000000000..ada51200f793a9bb18911e7d63cfdb3244b967d7
Binary files /dev/null and b/doc/design/ops/src/local-graph.png differ
diff --git a/doc/design/var_desc.md b/doc/design/var_desc.md
new file mode 100644
index 0000000000000000000000000000000000000000..86a95c10d5729704f86c285c9fe92db0cf2158be
--- /dev/null
+++ b/doc/design/var_desc.md
@@ -0,0 +1,124 @@
+## Background
+PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime.
+
+PaddlePaddle use proto message to describe compile time graph for
+
+1. Computation graph should be able to be saved to a file.
+1. In distributed training, the graph will be serialized and send to multiple workers.
+
+The computation graph is constructed by Data Node and Operation Node. The concept to represent them is in the table below.
+
+| |compile time|runtime|
+|---|---|---|
+|Data|VarDesc(proto)|Variable(cpp)|
+|Operation|OpDesc(proto)|Operator(cpp)|
+
+
+## Definition of VarDesc
+
+A VarDesc should have a name and value, in PaddlePaddle, the value will always be a tensor. Since we use LoDTensor most of the time. We add a LoDTesnorDesc to represent it.
+
+```proto
+message VarDesc {
+ required string name = 1;
+ optional LoDTensorDesc lod_tensor = 2;
+}
+```
+
+## Definition of LodTensorDesc
+
+```proto
+enum DataType {
+ BOOL = 0;
+ INT16 = 1;
+ INT32 = 2;
+ INT64 = 3;
+ FP16 = 4;
+ FP32 = 5;
+ FP64 = 6;
+}
+
+message LoDTensorDesc {
+ required DataType data_type = 1;
+ repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
+ optional int32 lod_level = 3 [default=0];
+}
+```
+
+## Definition of Variable in Python
+
+In Python API, layer will take Variable as Input, and return Variable as Output. There should be a class `Variable` in python to help create and manage Variable.
+
+```python
+image = Variable(dims=[-1, 640, 480])
+# fc1 and fc2 are both Variable
+fc1 = layer.fc(input=image, output_size=10)
+fc2 = layer.fc(input=fc1, output_size=20)
+```
+### what should class `Variable` Have
+1. `name`.a name of string type is used to mark the value of the Variable.
+1. `initializer`. Since our Tensor does not have value. we will always use some Operator to fullfill it when run. So we should have a initialize method to help add the init operator.
+1. `operator`. Variable should record which operator produce itself. The reaon is:
+ - we use pd.eval(targets=[var1, var2]) to run the related ops to get the value of var1 and var2. var.op is used to trace the dependency of the current variable.
+
+In PaddlePaddle, we use Block to describe Computation Graph, so in the code we will use Block but not Graph.
+
+```python
+import VarDesc
+import LoDTensorDesc
+import framework
+
+def AddInitialOperator(variable, initializer):
+ # add an initialize Operator to block to init this Variable
+
+class Variable(object):
+ def __init__(self, name, dims, type, initializer):
+ self._block = get_default_block()
+ self._name = name
+ self.op = None
+
+ tensor_desc = LoDTensorDesc(data_type=type, dims=dims)
+ _var_desc = VarDesc(name=name, lod_tensor=tensor_desc)
+ self._var = framework.CreateVar(_var_desc)
+ self._block.add_var(self)
+
+ # add initial op according to initializer
+ if initializer is not None:
+ AddInitialOperator(self, initializer)
+
+ def dims(self):
+ return self._var.dims()
+
+ def data_type(self):
+ return self._var.data_type()
+
+ def to_proto(self):
+ pass
+```
+
+Then we can use this Variable to create a fc layer in Python.
+
+```python
+import paddle as pd
+
+def flatten_size(X, num_flatten_dims):
+ prod = 1 # of last num_flatten_dims
+ for i in xrange(num_flatten_dims):
+ prod = prod * X.dims[-i-1]
+ return prod
+
+def layer.fc(X, output_size, num_flatten_dims):
+ W = Variable(pd.random_uniform(), type=FP32, dims=[flatten_size(X, num_flatten_dims), output_size])
+ b = Variable(pd.random_uniform(), type=FP32, dims=[output_size])
+ out = Variable(type=FP32)
+ y = operator.fc(X, W, b, output=out) # fc will put fc op input into out
+ pd.InferShape(y)
+ return out
+
+x = Variable(dims=[-1, 640, 480])
+y = layer.fc(x, output_size=100)
+z = layer.fc(y, output_size=200)
+
+paddle.eval(targets=[z], ...)
+print(z)
+```
diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md
index 58665e9f2b6299ec3959ed6858ab01d459f64dd8..c6570b89aedfaac1aef9b00e889b0b3ed21d8d65 100644
--- a/doc/howto/dev/new_op_cn.md
+++ b/doc/howto/dev/new_op_cn.md
@@ -34,7 +34,7 @@ Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU
注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中
-实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。
+实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。**
下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。
@@ -224,45 +224,15 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs,
### 5. 编译
-- 简单**无特殊依赖**的OP无需修改CMakeList.txt文件。[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动将 `paddle/operators` 目录下新增的 `*_op.cc` 文件加入编译。
-- 较为复杂、**有额外依赖** 的operator仍需要修改[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)。如,`mul_op` 依赖 `math_function`,需要在`CMakeLists.txt`中添加如下内容:
+运行下面命令可以进行编译:
- ```
- op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) +
- ```
-
-- 运行下面命令可以进行编译:
-
- ```
- make mul_op
- ```
+```
+make mul_op
+```
## 绑定Python
-- 绑定Python
-
- 在 [`paddle/pybind/pybind.cc
-`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) 使用`USE_OP`告知编译器需要链接的Op,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
-
- ```
- USE_OP(mul);
- ```
- 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`:
-
- ```
- USE_CPU_ONLY_OP(gather);
- ```
-
- 如果OP不带Kernel,则使用`USE_NO_KENREL_OP`:
-
- ```
- USE_NO_KENREL_OP(recurrent);
- ```
-
-
- - 生成库
-
- 无需修改 [`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件,`paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。
+系统会对新增的op自动绑定Python,并链接到生成的lib库中。
## 实现单元测试
@@ -354,11 +324,7 @@ class TestMulGradOp(GradientChecker):
### 编译和执行单元测试
-单元测试编写完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)中添加以下内容,将单元测试加入工程:
-
-```
-py_test(test_mul_op SRCS test_mul_op.py)
-```
+`python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。
请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试:
@@ -371,3 +337,10 @@ make test ARGS="-R test_mul_op -V"
```bash
ctest -R test_mul_op
```
+
+## 注意事项
+
+- 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc`和`*_op.cu`(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。
+- 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`等,这将会导致单元测试出错。
+- 如果Op没有实现GPU Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。
+- 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。
diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/howto/dev/write_docs_cn.rst
index 36e5d420c986fc8d88eefee4aa221dba0a0480f2..731a63f945c29ba78538b3d71289b234e569354d 100644
--- a/doc/howto/dev/write_docs_cn.rst
+++ b/doc/howto/dev/write_docs_cn.rst
@@ -5,15 +5,13 @@
PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。
-如何构建PaddlePaddle的文档
-==========================
+如何构建文档
+============
-PaddlePaddle的文档构建有直接构建和基于Docker构建两种方式,我们提供了一个构建脚本build_docs.sh来进行构建。
-PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使用基于Docker来构建PaddlePaddle的文档。
+PaddlePaddle的文档构建有两种方式。
-
-使用Docker构建PaddlePaddle的文档
---------------------------------
+使用Docker构建
+--------------
使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即
@@ -21,58 +19,46 @@ PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使
cd TO_YOUR_PADDLE_CLONE_PATH
cd paddle/scripts/tools/build_docs
- bash build_docs.sh with_docker
-
-编译完成后,会在当前目录生成两个子目录\:
-
-* doc 英文文档目录
-* doc_cn 中文文档目录
+ sh build_docs.sh
+编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。
打开浏览器访问对应目录下的index.html即可访问本地文档。
-
-
-直接构建PaddlePaddle的文档
---------------------------
-
-因为PaddlePaddle的v2 api文档生成过程依赖于py_paddle Python包,用户需要首先确认py_paddle包已经安装。
-
-.. code-block:: bash
-
- python -c "import py_paddle"
-
-如果提示错误,那么用户需要在本地编译安装PaddlePaddle,请参考 `源码编译文档 `_ 。
-注意,用户在首次编译安装PaddlePaddle时,请将WITH_DOC选项关闭。在编译安装正确之后,请再次确认py_paddle包已经安装,即可进行下一步操作。
+直接构建
+--------
如果提示正确,可以执行以下命令编译生成文档,即
.. code-block:: bash
cd TO_YOUR_PADDLE_CLONE_PATH
- cd paddle/scripts/tools/build_docs
- bash build_docs.sh local
-
-编译完成之后,会在当前目录生成两个子目录\:
-
-* doc 英文文档目录
-* doc_cn 中文文档目录
+ mkdir -p build
+ cd build
+ cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON
+ make gen_proto_py
+ make paddle_docs paddle_docs_cn
+编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。
打开浏览器访问对应目录下的index.html即可访问本地文档。
-如何书写PaddlePaddle的文档
-==========================
+如何书写文档
+============
PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。
-如何更新www.paddlepaddle.org文档
-================================
+如何更新文档主题
+================
+
+PaddlePaddle文档主题在 `TO_YOUR_PADDLE_CLONE_PATH/doc_theme` 文件夹下,包含所有和前端网页设计相关的文件。
-开发者给PaddlePaddle代码增加的注释以PR的形式提交到github中,提交方式可参见 `贡献文档 `_ 。
+如何更新doc.paddlepaddle.org
+============================
+
+更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。
目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和
`英文文档 `_ 。
-
.. _cmake: https://cmake.org/
.. _sphinx: http://www.sphinx-doc.org/en/1.4.8/
diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt
index dde99ab3400be4e61bfe119fc272270518acf070..3af111eb5738c3f2f399ff4e5c06c8d2ecd8973e 100644
--- a/paddle/capi/CMakeLists.txt
+++ b/paddle/capi/CMakeLists.txt
@@ -64,9 +64,29 @@ link_paddle_exe(paddle_capi_shared)
install(FILES ${CAPI_HEADERS} DESTINATION include/paddle)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle)
if(ANDROID)
+ execute_process(
+ COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1
+ OUTPUT_VARIABLE GIT_COMMITS_LIST
+ RESULT_VARIABLE GIT_COMMITS_LIST_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(${GIT_COMMITS_LIST_RESULT})
+ set(GIT_COMMITS_LIST "No commits.")
+ endif()
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library}
DESTINATION lib/${ANDROID_ABI})
install(TARGETS paddle_capi_shared DESTINATION lib/${ANDROID_ABI})
+ install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt
+ \"Compiler:\n\"
+ \"\\t${CMAKE_C_COMPILER}\\n\"
+ \"\\t${CMAKE_CXX_COMPILER}\\n\"
+ \"Compiler Flags:\\n\"
+ \"\\t${CMAKE_F_FLAGS}\\n\"
+ \"\\t${CMAKE_CXX_FLAGS}\\n\"
+ \"Android API: ${CMAKE_SYSTEM_VERSION}\\n\"
+ \"Lastest commit:\\n\"
+ \"\\t${GIT_COMMITS_LIST}\\n\"
+ )"
+ )
else(ANDROID)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib)
install(TARGETS paddle_capi_shared DESTINATION lib)
diff --git a/paddle/cuda/include/hl_cpu_gru.cuh b/paddle/cuda/include/hl_cpu_gru.cuh
index c0a37ced2a72a1ab410025e2aa45313c23f1349a..e4f6bf42c61694e9826a127c9628730cfd43ada7 100644
--- a/paddle/cuda/include/hl_cpu_gru.cuh
+++ b/paddle/cuda/include/hl_cpu_gru.cuh
@@ -18,14 +18,6 @@ limitations under the License. */
#ifndef __NVCC__
-#include "paddle/math/MathFunctions.h"
-
-#ifndef PADDLE_TYPE_DOUBLE
-#define CBLAS_GEMM paddle::gemm
-#else
-#define CBLAS_GEMM paddle::gemm
-#endif
-
template
void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput,
real *gateValue,
@@ -210,51 +202,6 @@ inline void forward_final_output(OpFinalOutput opFinalOutput,
}
}
-template
-void hl_cpu_gru_forward(OpResetOutput opResetOutput,
- OpFinalOutput opFinalOutput,
- hl_gru_value value,
- int frameSize,
- int batchSize,
- hl_activation_mode_t active_node,
- hl_activation_mode_t active_gate) {
- if (value.prevOutValue) {
- CBLAS_GEMM(CblasNoTrans,
- CblasNoTrans,
- batchSize,
- 2 * frameSize,
- frameSize,
- 1,
- value.prevOutValue,
- frameSize,
- value.gateWeight,
- frameSize * 2,
- 1,
- value.gateValue,
- frameSize * 3);
- }
-
- forward_reset_output(opResetOutput, value, frameSize, batchSize, active_gate);
-
- if (value.prevOutValue) {
- CBLAS_GEMM(CblasNoTrans,
- CblasNoTrans,
- batchSize,
- frameSize,
- frameSize,
- 1,
- value.resetOutputValue,
- frameSize,
- value.stateWeight,
- frameSize,
- 1,
- value.gateValue + frameSize * 2,
- frameSize * 3);
- }
-
- forward_final_output(opFinalOutput, value, frameSize, batchSize, active_node);
-}
-
template
void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad,
real *gateValue,
@@ -525,86 +472,6 @@ inline void backward_reset_grad(OpResetGrad opResetGrad,
}
}
-template
-void hl_cpu_gru_backward(OpStateGrad opStateGrad,
- OpResetGrad opResetGrad,
- hl_gru_value value,
- hl_gru_grad grad,
- int frameSize,
- int batchSize,
- hl_activation_mode_t active_node,
- hl_activation_mode_t active_gate) {
- backward_state_grad(opStateGrad, value, grad,
- frameSize, batchSize, active_node);
-
- if (value.prevOutValue && grad.prevOutGrad) {
- CBLAS_GEMM(CblasNoTrans,
- CblasTrans,
- batchSize,
- frameSize,
- frameSize,
- 1,
- grad.gateGrad + frameSize * 2,
- frameSize * 3,
- value.stateWeight,
- frameSize,
- 0,
- grad.resetOutputGrad,
- frameSize);
-
- if (grad.stateWeightGrad) {
- CBLAS_GEMM(CblasTrans,
- CblasNoTrans,
- frameSize,
- frameSize,
- batchSize,
- 1,
- value.resetOutputValue,
- frameSize,
- grad.gateGrad + frameSize * 2,
- frameSize * 3,
- 1,
- grad.stateWeightGrad,
- frameSize);
- }
- }
-
- backward_reset_grad(opResetGrad, value, grad,
- frameSize, batchSize, active_gate);
-
- if (grad.prevOutGrad && value.prevOutValue) {
- CBLAS_GEMM(CblasNoTrans,
- CblasTrans,
- batchSize,
- frameSize,
- frameSize * 2,
- 1,
- grad.gateGrad,
- frameSize * 3,
- value.gateWeight,
- frameSize * 2,
- 1,
- grad.prevOutGrad,
- frameSize);
-
- if (grad.gateWeightGrad) {
- CBLAS_GEMM(CblasTrans,
- CblasNoTrans,
- frameSize,
- frameSize * 2,
- batchSize,
- 1,
- value.prevOutValue,
- frameSize,
- grad.gateGrad,
- frameSize * 3,
- 1,
- grad.gateWeightGrad,
- frameSize * 2);
- }
- }
-}
-
#endif
#endif // HL_CPU_GRU_CUH_
diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt
index c0838d9b759110fd706577386d2c81bda6876223..3371962c635c3731f00a6af2a6e287ece33397cd 100644
--- a/paddle/framework/CMakeLists.txt
+++ b/paddle/framework/CMakeLists.txt
@@ -9,6 +9,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor)
+nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor)
cc_test(variable_test SRCS variable_test.cc)
diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h
index cde3dfa1d3d19b1bee9fd23dad52ecbbe628c3a9..2b788a76cafe198abb9aed8ba842e37cc6ff73a6 100644
--- a/paddle/framework/attribute.h
+++ b/paddle/framework/attribute.h
@@ -45,7 +45,19 @@ class GreaterThanChecker {
public:
explicit GreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(T& value) const {
- PADDLE_ENFORCE(value > lower_bound_, "larger_than check fail");
+ PADDLE_ENFORCE(value > lower_bound_, "larger_than check fails.");
+ }
+
+ private:
+ T lower_bound_;
+};
+
+template
+class EqualGreaterThanChecker {
+ public:
+ explicit EqualGreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
+ void operator()(T& value) const {
+ PADDLE_ENFORCE_GE(value, lower_bound_, "equal_larger_than check fails.");
}
private:
@@ -115,6 +127,11 @@ class TypedAttrChecker {
return *this;
}
+ TypedAttrChecker& EqualGreaterThan(const T& lower_bound) {
+ value_checkers_.push_back(EqualGreaterThanChecker(lower_bound));
+ return *this;
+ }
+
// we can add more common limits, like LessThan(), Between()...
TypedAttrChecker& SetDefault(const T& default_value) {
diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md
index 8aa6728a95bc464ab8884986f0cec6c817d3303b..0a6d762bc8be5201ac196b4bc6107c06d07a31d7 100644
--- a/paddle/framework/backward.md
+++ b/paddle/framework/backward.md
@@ -2,20 +2,31 @@
## Motivation
-In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the fundmental gradient operators/expressions together with chain rule . Every forward network need a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
-
-## Backward Operator Registry
+In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
-A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs and output gradients and then calculate its input gradients.
+## Implementation
+
+In this design doc, we exported only one API for generating the backward pass.
+
+```c++
+std::unique_ptr Backward(const OperatorBase& forwardOp,
+ const std::unordered_set& no_grad_vars);
+```
+
+The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**.
+
+### Backward Operator Registry
+
+A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients.
| | forward operator | backward operator
| ---------------------- | ---------------- |------------------------- |
| **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients |
| **Operator::outputs_** | Outputs | InputGradients |
- In most cases, there is a one-to-one correspondence between forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced.
+ In most cases, there is a one-to-one correspondence between the forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced.
-For example, we have got a `mul_op`, and we can register it's information and corresponding backward operator by the following macro:
+For example, we have got a `mul_op`, and we can register its information and corresponding backward operator by the following macro:
```cpp
REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
@@ -25,58 +36,65 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
`mul_grad` is the type of backward operator, and `MulOpGrad` is its class name.
-## Backward Opeartor Creating
+### Backward Opeartor Creating
-Given a certain forward operator, we can get its corresponding backward opeartor by calling:
+Given a certain forward operator, we can get its corresponding backward operator by calling:
```cpp
OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op);
-```
+```
The function `BuildGradOp` will sequentially execute following processes:
1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`.
-2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these are not necessary for gradient computing.
+2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these, are not necessary for gradient computing.
3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`.
4. Building backward operator with `inputs`, `outputs` and forward operator's attributes.
-## Backward Network Building
+### Backward Network Building
-A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together.
-
-In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network.
-
-given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`,`InputGradients`.
+A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially.
1. Op
- when the input forward network is a Op, return its gradient Operator Immediately.
+ When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`.
2. NetOp
- when the input forward network is a NetOp, it need to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to forward NetOp.
+ In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
+
+3. RnnOp
+
+ RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet.
+
+4. Sharing Variables
+
+ **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.
+
+
+
- **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwirte their shared input variable.
+ pic 1. Sharing variables in operators.
-
- 
+
- 1. shared variable in two operators.
+ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links.
-
+
+
- Share variable between operators or same input variable used in multiple operators lead to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively, and add a generic add operator replace the overwirte links.
+ pic 2. Replace sharing variable's gradient with `Add` operator.
-
- 
+
- 2. replace shared variable gradient with `Add` Operator
+ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise.
-
+5. Part of Gradient is Zero.
+ In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator.
- Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
+Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
diff --git a/paddle/framework/ddim.cc b/paddle/framework/ddim.cc
index 85b7de79743bb0390d66b8999f2e8342a51d14a9..fc3d508553c0e966978b28d58127bdbff10d45f1 100644
--- a/paddle/framework/ddim.cc
+++ b/paddle/framework/ddim.cc
@@ -283,5 +283,14 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
DDim::DDim(std::initializer_list init_list) {
*this = make_ddim(init_list);
}
+
+DDim flatten_to_2d(const DDim& src, int num_col_dims) {
+ int rank = src.size();
+ return make_ddim({product(slice_ddim(src, 0, num_col_dims)),
+ product(slice_ddim(src, num_col_dims, rank))});
+}
+
+DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); }
+
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/ddim.h b/paddle/framework/ddim.h
index db30c523948b1d437615aa0e9bfecb5e25569296..ca29e7e8c7776de6adf3e3b0e8f11f0d4d8487c3 100644
--- a/paddle/framework/ddim.h
+++ b/paddle/framework/ddim.h
@@ -115,6 +115,12 @@ int arity(const DDim& ddim);
std::ostream& operator<<(std::ostream&, const DDim&);
+// Reshape a tensor to a matrix. The matrix's first dimension(column length)
+// will be the product of tensor's first `num_col_dims` dimensions.
+DDim flatten_to_2d(const DDim& src, int num_col_dims);
+
+DDim flatten_to_1d(const DDim& src);
+
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/eigen.h b/paddle/framework/eigen.h
index 2d8d9ae10c56e0632414a5bbc754d35bfa9ce6a5..54bbeafcabdeeb1e2c1017c156b3512c83dada3a 100644
--- a/paddle/framework/eigen.h
+++ b/paddle/framework/eigen.h
@@ -63,20 +63,35 @@ struct EigenTensor {
template
-struct EigenMatrix : public EigenTensor {};
+struct EigenMatrix : public EigenTensor {
+ static typename EigenMatrix::Type Reshape(Tensor& tensor, int num_col_dims) {
+ int rank = tensor.dims_.size();
+ PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
+ "`num_col_dims` must be between (0, rank_of_tensor).");
+ return EigenMatrix::From(tensor,
+ flatten_to_2d(tensor.dims(), num_col_dims));
+ }
+
+ static typename EigenMatrix::ConstType Reshape(const Tensor& tensor,
+ int num_col_dims) {
+ int rank = tensor.dims_.size();
+ PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
+ "`num_col_dims` must be between (0, rank_of_tensor).");
+ return EigenMatrix::From(tensor,
+ flatten_to_2d(tensor.dims(), num_col_dims));
+ }
+};
template
struct EigenVector : public EigenTensor {
// Flatten reshapes a Tensor into an EigenVector.
static typename EigenVector::Type Flatten(Tensor& tensor) {
- return EigenVector::From(
- tensor, make_ddim({static_cast(product(tensor.dims_))}));
+ return EigenVector::From(tensor, {product(tensor.dims_)});
}
static typename EigenVector::ConstType Flatten(const Tensor& tensor) {
- return EigenVector::From(
- tensor, make_ddim({static_cast(product(tensor.dims_))}));
+ return EigenVector::From(tensor, {product(tensor.dims_)});
}
};
diff --git a/paddle/framework/eigen_test.cc b/paddle/framework/eigen_test.cc
index dc1957691b1a202826e10e84c21ac8874df9e378..bc4a2db32cfba66bef2c444e1f822e0d2a57b91e 100644
--- a/paddle/framework/eigen_test.cc
+++ b/paddle/framework/eigen_test.cc
@@ -108,5 +108,24 @@ TEST(Eigen, Matrix) {
}
}
+TEST(Eigen, MatrixReshape) {
+ Tensor t;
+ float* p = t.mutable_data({2, 3, 6, 4}, platform::CPUPlace());
+ for (int i = 0; i < 2 * 3 * 6 * 4; ++i) {
+ p[i] = static_cast(i);
+ }
+
+ EigenMatrix::Type em = EigenMatrix::Reshape(t, 2);
+
+ ASSERT_EQ(2 * 3, em.dimension(0));
+ ASSERT_EQ(6 * 4, em.dimension(1));
+
+ for (int i = 0; i < 2 * 3; i++) {
+ for (int j = 0; j < 6 * 4; j++) {
+ ASSERT_NEAR(i * 6 * 4 + j, em(i, j), 1e-6f);
+ }
+ }
+}
+
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/images/duplicate_op2.graffle b/paddle/framework/images/duplicate_op2.graffle
index 2b658085d6a55d368c320051ba7f94ec2900f13c..5cec3bc64dbd44dc99e348485969f29bd128ceb1 100644
Binary files a/paddle/framework/images/duplicate_op2.graffle and b/paddle/framework/images/duplicate_op2.graffle differ
diff --git a/paddle/framework/images/duplicate_op2.png b/paddle/framework/images/duplicate_op2.png
index c5588015d1450fd8c1bda3580680d884494868bb..21cdd5cabf1b5203e1435a75b57770d2f702fa92 100644
Binary files a/paddle/framework/images/duplicate_op2.png and b/paddle/framework/images/duplicate_op2.png differ
diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h
index 154068fef69bc96edbd85b731fe8091b3b1ff823..fac5cd20aa7f9db0792f8102bb442192ab1ad63f 100644
--- a/paddle/framework/lod_tensor.h
+++ b/paddle/framework/lod_tensor.h
@@ -18,8 +18,10 @@
#ifndef PADDLE_ONLY_CPU
#include
#include
+#include
#endif
+#include
#include "paddle/framework/ddim.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/enforce.h"
@@ -32,7 +34,8 @@ template
using Vector = std::vector;
#else
template
-using Vector = thrust::host_vector;
+using Vector = thrust::host_vector<
+ T, thrust::system::cuda::experimental::pinned_allocator>;
#endif
using LoD = std::vector>;
@@ -48,18 +51,15 @@ bool operator==(const LoD& a, const LoD& b);
* LoDTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
-class LoDTensor {
+class LoDTensor : public Tensor {
public:
LoDTensor() {}
- LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {}
- void set_lod(const LoD& lod) { lod_ = lod; }
-
- void set_tensor(Tensor* tensor) { tensor_ = tensor; }
+ explicit LoDTensor(const LoD& lod) : lod_(lod) {}
- Tensor& tensor() { return *tensor_; }
+ void set_lod(const LoD& lod) { lod_ = lod; }
- LoD lod() { return lod_; }
+ LoD lod() const { return lod_; }
/*
* Get a element from LoD.
@@ -101,7 +101,6 @@ class LoDTensor {
private:
LoD lod_;
- Tensor* tensor_; // not owned
};
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc
index 1da8553134f377f7a4fbe8008d12fe8d4a0e47f4..7915326b27a22e9280e3f09d9bbfc2a58f46aff7 100644
--- a/paddle/framework/lod_tensor_test.cc
+++ b/paddle/framework/lod_tensor_test.cc
@@ -36,69 +36,64 @@ class LoDTensorTester : public ::testing::Test {
ASSERT_EQ(lod.size(), 3UL);
- tensor.Resize({20 /*batch size*/, 128 /*dim*/});
+ lod_tensor_.Resize({20 /*batch size*/, 128 /*dim*/});
// malloc memory
- tensor.mutable_data(place);
+ lod_tensor_.mutable_data(place);
- lod_tensor.set_lod(lod);
- lod_tensor.set_tensor(&tensor);
+ lod_tensor_.set_lod(lod);
}
protected:
platform::CPUPlace place;
- Tensor tensor;
- LoDTensor lod_tensor;
+ LoDTensor lod_tensor_;
};
-TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); }
+TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); }
TEST_F(LoDTensorTester, NumElements) {
- ASSERT_EQ(lod_tensor.NumElements(0), 2UL);
- ASSERT_EQ(lod_tensor.NumElements(1), 4UL);
- ASSERT_EQ(lod_tensor.NumElements(2), 8UL);
+ ASSERT_EQ(lod_tensor_.NumElements(0), 2UL);
+ ASSERT_EQ(lod_tensor_.NumElements(1), 4UL);
+ ASSERT_EQ(lod_tensor_.NumElements(2), 8UL);
}
TEST_F(LoDTensorTester, SliceLevels) {
// slice 1 level
for (size_t level = 0; level < 3UL; ++level) {
- LoDTensor new_lod_tensor = lod_tensor;
+ LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
- ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
}
// slice 2 level
for (size_t level = 0; level < 2UL; ++level) {
- LoDTensor new_lod_tensor = lod_tensor;
+ LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
- ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
- ASSERT_EQ(new_lod_tensor.NumElements(1), lod_tensor.NumElements(level + 1));
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
+ ASSERT_EQ(new_lod_tensor.NumElements(1),
+ lod_tensor_.NumElements(level + 1));
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
}
}
TEST_F(LoDTensorTester, SliceInLevel) {
size_t level = 0;
- LoDTensor new_lod_tensor = lod_tensor;
+ LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL);
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
level = 1;
- new_lod_tensor = lod_tensor;
+ new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
}
} // namespace framework
diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu
new file mode 100644
index 0000000000000000000000000000000000000000..97e69cdb2e5e1e64031c899f5e04020665485ba8
--- /dev/null
+++ b/paddle/framework/lod_tensor_test.cu
@@ -0,0 +1,50 @@
+/*
+ Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include
+#include
+#include "paddle/framework/lod_tensor.h"
+#include "paddle/platform/assert.h"
+
+#include
+
+__global__ void test(size_t* a, int size) {
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
+ i += blockDim.x * gridDim.x) {
+ a[i] *= 2;
+ }
+}
+
+TEST(LoDTensor, LoDInGPU) {
+ paddle::framework::LoDTensor lod_tensor;
+ paddle::platform::GPUPlace place(0);
+
+ paddle::framework::LoD src_lod;
+ src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14});
+
+ lod_tensor.Resize({14, 16});
+ lod_tensor.mutable_data(place);
+
+ lod_tensor.set_lod(src_lod);
+ CHECK_EQ(lod_tensor.lod_element(0, 2), 4);
+ CHECK_EQ(lod_tensor.lod_element(0, 4), 8);
+
+ auto lod = lod_tensor.lod();
+
+ test<<<1, 8>>>(lod[0].data(), lod[0].size());
+ cudaDeviceSynchronize();
+
+ for (size_t i = 0; i < src_lod[0].size(); ++i) {
+ CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
+ }
+}
diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc
index 790cfc4746b1d34da413fa3c29a266f962c6dde6..c57537be4bf67a8db6a49669ab8d2ed1b1324bdc 100644
--- a/paddle/framework/operator.cc
+++ b/paddle/framework/operator.cc
@@ -123,6 +123,15 @@ OperatorBase::OperatorBase(const std::string& type,
CheckAllInputOutputSet();
}
+std::vector OperatorBase::InputVars() const {
+ std::vector ret_val;
+ for (auto& o : outputs_) {
+ ret_val.reserve(ret_val.size() + o.second.size());
+ ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
+ }
+ return ret_val;
+}
+
std::vector OperatorBase::OutputVars(bool has_intermediate) const {
std::vector ret_val;
if (has_intermediate) {
@@ -177,6 +186,48 @@ void OperatorBase::GenerateTemporaryNames() {
}
}
+template <>
+const Tensor* InferShapeContext::Input(const std::string& name) const {
+ auto* var = InputVar(name);
+ return var == nullptr ? nullptr : GetTensorFromVar(var);
+}
+
+template <>
+const std::vector InferShapeContext::MultiInput(
+ const std::string& name) const {
+ auto names = op().Inputs(name);
+ std::vector res;
+ res.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(res),
+ [&](const std::string& sub_name) {
+ auto var = scope_.FindVar(sub_name);
+ return var == nullptr ? nullptr : GetTensorFromVar(var);
+ });
+ return res;
+}
+
+template <>
+Tensor* ExecutionContext::Output(const std::string& name) const {
+ auto* var = OutputVar(name);
+ return var == nullptr ? nullptr : const_cast(GetTensorFromVar(var));
+}
+
+template <>
+std::vector ExecutionContext::MultiOutput(
+ const std::string& name) const {
+ auto names = op().Outputs(name);
+ std::vector res;
+ res.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(res),
+ [&](const std::string& sub_name) {
+ auto var = scope().FindVar(sub_name);
+ return var == nullptr
+ ? nullptr
+ : const_cast(GetTensorFromVar(var));
+ });
+ return res;
+}
+
void OpProtoAndCheckerMaker::Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index 9a98d4d3be0d1cb875d614b263f1e4365ede4113..adae7bfc3d7d31b1ed0373f01db4ef80343a08f7 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -22,6 +22,7 @@ limitations under the License. */
#include "op_info.h"
#include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
+#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
@@ -94,11 +95,14 @@ class OperatorBase {
const VariableNameMap& Inputs() const { return inputs_; }
const VariableNameMap& Outputs() const { return outputs_; }
+
//! Get a input with argument's name described in `op_proto`
std::string Input(const std::string& name) const;
//! Get a input which has multiple variables.
const std::vector& Inputs(const std::string& name) const;
+ std::vector InputVars() const;
+
//! Get a output with argument's name described in `op_proto`
std::string Output(const std::string& name) const;
//! Get an output which has multiple variables.
@@ -311,9 +315,9 @@ class InferShapeContext {
}
template
- std::vector MultiOutput(const std::string& name) const {
+ std::vector MultiOutput(const std::string& name) const {
auto names = op_.Outputs(name);
- std::vector res;
+ std::vector res;
res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) {
@@ -323,11 +327,27 @@ class InferShapeContext {
return res;
}
+ const Tensor* GetTensorFromVar(const Variable* var) const {
+ if (var->IsType()) {
+ return &var->Get();
+ }
+ PADDLE_ENFORCE(var->IsType(),
+ "The Input(%s) must be LoDTensor or Tensor.");
+ return &var->Get();
+ }
+
private:
const OperatorBase& op_;
const Scope& scope_;
};
+template <>
+const Tensor* InferShapeContext::Input(const std::string& name) const;
+
+template <>
+const std::vector InferShapeContext::MultiInput(
+ const std::string& name) const;
+
template
struct EigenDeviceConverter;
@@ -360,9 +380,37 @@ class ExecutionContext : public InferShapeContext {
return device_context_;
}
+ // redefine Output function,
+ // use Variable::Get instead of Variable::GetMutable
+ template
+ T* Output(const std::string& name) const {
+ auto var = OutputVar(name);
+ return var == nullptr ? nullptr : const_cast(&var->Get());
+ }
+
+ // redefine MultiOutput function.
+ // use Variable::Get instead of Variable::GetMutable
+ template
+ std::vector MultiOutput(const std::string& name) const {
+ auto names = op().Outputs(name);
+ std::vector res;
+ res.reserve(names.size());
+ std::transform(
+ names.begin(), names.end(), std::back_inserter(res),
+ [&](const std::string& sub_name) { return Output(sub_name); });
+ return res;
+ }
+
const platform::DeviceContext* device_context_;
};
+template <>
+Tensor* ExecutionContext::Output(const std::string& name) const;
+
+template <>
+std::vector ExecutionContext::MultiOutput(
+ const std::string& name) const;
+
class OpKernel {
public:
/**
diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h
index 643f875491724bf443bd7727391734377ee6180c..4b5a2ae523f2f7fde5445f0534cd99969ad9d59e 100644
--- a/paddle/framework/tensor.h
+++ b/paddle/framework/tensor.h
@@ -43,6 +43,9 @@ class Tensor {
template
friend struct EigenTensor;
+ template
+ friend struct EigenMatrix;
+
template
friend struct EigenVector;
@@ -78,6 +81,9 @@ class Tensor {
/*! Return the dimensions of the memory block. */
inline const DDim& dims() const;
+ /*! Return the numel of the memory block. */
+ inline int64_t numel() const;
+
/*! Resize the dimensions of the memory block. */
inline Tensor& Resize(const DDim& dims);
@@ -159,6 +165,12 @@ class Tensor {
/*! points to dimensions of memory block. */
DDim dims_;
+ /**
+ * A cache of the number of elements in a tensor.
+ * Would be 0 for an uninitialized tensor.
+ */
+ int64_t numel_;
+
/**
* @brief A PlaceHolder may be shared by more than one tensor.
*
diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h
index 94f436294f350e2a39785a09959efb3b17bd00a5..642b53efc7095d25712ca324638f5fe9b8316c0c 100644
--- a/paddle/framework/tensor_impl.h
+++ b/paddle/framework/tensor_impl.h
@@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const {
PADDLE_ENFORCE_NOT_NULL(
holder_, "Tenosr holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE_GE(
- holder_->size(), product(dims_) * sizeof(T) + offset_,
+ holder_->size(), numel() * sizeof(T) + offset_,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data already stored.");
@@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
template
inline T* Tensor::mutable_data(platform::Place place) {
static_assert(std::is_pod::value, "T must be POD");
- PADDLE_ENFORCE_GT(product(dims_), 0,
+ PADDLE_ENFORCE_GT(numel(), 0,
"Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first.");
/* some versions of boost::variant don't have operator!= */
- int64_t size = product(dims_) * sizeof(T);
+ int64_t size = numel() * sizeof(T);
if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) {
if (platform::is_cpu_place(place)) {
@@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src,
auto dst_ptr = static_cast(mutable_data(dst_place));
- auto size = product(src.dims_) * sizeof(T);
+ auto size = src.numel() * sizeof(T);
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get(dst_place), dst_ptr,
@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LT(begin_idx, end_idx,
"Begin index must be less than end index.");
PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1.");
- size_t base = product(dims_) / dims_[0];
+ size_t base = numel() / dims_[0];
Tensor dst;
dst.holder_ = holder_;
DDim dst_dims = dims_;
@@ -143,10 +143,21 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
inline Tensor& Tensor::Resize(const DDim& dims) {
dims_ = dims;
+ numel_ = product(dims_);
return *this;
}
inline const DDim& Tensor::dims() const { return dims_; }
+inline int64_t Tensor::numel() const { return numel_; }
+
+template
+inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
+ Tensor res;
+ res.ShareDataWith(src);
+ res.Resize(flatten_to_2d(src.dims(), num_col_dims));
+ return res;
+}
+
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc
index 7db38d5caeebccf710334e854faf785ef0f64063..55302ea47120f420e952b26830c8ea4cbcce6435 100644
--- a/paddle/framework/tensor_test.cc
+++ b/paddle/framework/tensor_test.cc
@@ -262,3 +262,16 @@ TEST(Tensor, CopyFrom) {
}
#endif
}
+
+TEST(Tensor, ReshapeToMatrix) {
+ using namespace paddle::framework;
+ using namespace paddle::platform;
+ Tensor src;
+ int* src_ptr = src.mutable_data({2, 3, 4, 9}, CPUPlace());
+ for (int i = 0; i < 2 * 3 * 4 * 9; ++i) {
+ src_ptr[i] = i;
+ }
+ Tensor res = ReshapeToMatrix(src, 2);
+ ASSERT_EQ(res.dims()[0], 2 * 3);
+ ASSERT_EQ(res.dims()[1], 4 * 9);
+}
\ No newline at end of file
diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt
index f43f15e5cacb70b625d7791e1e02ce7780286200..4fd72d64a90ae6f16dd1499ceb7fba6e40fe4cea 100644
--- a/paddle/function/CMakeLists.txt
+++ b/paddle/function/CMakeLists.txt
@@ -44,6 +44,7 @@ if(WITH_GPU)
add_simple_unittest(RowConvOpTest)
add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest)
+ add_simple_unittest(SwitchOpTest)
endif()
add_simple_unittest(Im2ColTest)
diff --git a/paddle/function/EigenGemm.cpp b/paddle/function/EigenGemm.cpp
index 674141ed39b7f5573948348e3ba3bb526ae43c66..b3e666e860d29d89650d48a23cf44917035a02d7 100644
--- a/paddle/function/EigenGemm.cpp
+++ b/paddle/function/EigenGemm.cpp
@@ -83,9 +83,9 @@ struct EigenBlasGemm {
};
#ifdef PADDLE_TYPE_DOUBLE
-template class EigenBlasGemm;
+template struct EigenBlasGemm;
#else
-template class EigenBlasGemm;
+template struct EigenBlasGemm;
#endif
} // namespace paddle
diff --git a/paddle/function/GruFunctor.h b/paddle/function/GruFunctor.h
new file mode 100644
index 0000000000000000000000000000000000000000..9f6392198ea360502f313cbe15dfae46ece69758
--- /dev/null
+++ b/paddle/function/GruFunctor.h
@@ -0,0 +1,159 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "GemmFunctor.h"
+#include "hl_cpu_gru.cuh"
+
+namespace paddle {
+
+template
+struct GruFunctor {
+ template
+ static void compute(OpResetOutput opResetOutput,
+ OpFinalOutput opFinalOutput,
+ hl_gru_value value,
+ int frameSize,
+ int batchSize,
+ hl_activation_mode_t active_node,
+ hl_activation_mode_t active_gate) {
+#ifndef __NVCC__
+ if (value.prevOutValue) {
+ BlasGemm::compute(false,
+ false,
+ batchSize,
+ 2 * frameSize,
+ frameSize,
+ 1,
+ value.prevOutValue,
+ frameSize,
+ value.gateWeight,
+ frameSize * 2,
+ 1,
+ value.gateValue,
+ frameSize * 3);
+ }
+
+ forward_reset_output(
+ opResetOutput, value, frameSize, batchSize, active_gate);
+
+ if (value.prevOutValue) {
+ BlasGemm::compute(false,
+ false,
+ batchSize,
+ frameSize,
+ frameSize,
+ 1,
+ value.resetOutputValue,
+ frameSize,
+ value.stateWeight,
+ frameSize,
+ 1,
+ value.gateValue + frameSize * 2,
+ frameSize * 3);
+ }
+
+ forward_final_output(
+ opFinalOutput, value, frameSize, batchSize, active_node);
+#endif
+ }
+};
+
+template
+struct GruGradFunctor {
+ template
+ static void compute(OpStateGrad opStateGrad,
+ OpResetGrad opResetGrad,
+ hl_gru_value value,
+ hl_gru_grad grad,
+ int frameSize,
+ int batchSize,
+ hl_activation_mode_t active_node,
+ hl_activation_mode_t active_gate) {
+#ifndef __NVCC__
+ backward_state_grad(
+ opStateGrad, value, grad, frameSize, batchSize, active_node);
+
+ if (value.prevOutValue && grad.prevOutGrad) {
+ BlasGemm::compute(false,
+ true,
+ batchSize,
+ frameSize,
+ frameSize,
+ 1,
+ grad.gateGrad + frameSize * 2,
+ frameSize * 3,
+ value.stateWeight,
+ frameSize,
+ 0,
+ grad.resetOutputGrad,
+ frameSize);
+
+ if (grad.stateWeightGrad) {
+ BlasGemm::compute(true,
+ false,
+ frameSize,
+ frameSize,
+ batchSize,
+ 1,
+ value.resetOutputValue,
+ frameSize,
+ grad.gateGrad + frameSize * 2,
+ frameSize * 3,
+ 1,
+ grad.stateWeightGrad,
+ frameSize);
+ }
+ }
+
+ backward_reset_grad(
+ opResetGrad, value, grad, frameSize, batchSize, active_gate);
+
+ if (grad.prevOutGrad && value.prevOutValue) {
+ BlasGemm::compute(false,
+ true,
+ batchSize,
+ frameSize,
+ frameSize * 2,
+ 1,
+ grad.gateGrad,
+ frameSize * 3,
+ value.gateWeight,
+ frameSize * 2,
+ 1,
+ grad.prevOutGrad,
+ frameSize);
+
+ if (grad.gateWeightGrad) {
+ BlasGemm::compute(true,
+ false,
+ frameSize,
+ frameSize * 2,
+ batchSize,
+ 1,
+ value.prevOutValue,
+ frameSize,
+ grad.gateGrad,
+ frameSize * 3,
+ 1,
+ grad.gateWeightGrad,
+ frameSize * 2);
+ }
+ }
+#endif
+ }
+};
+
+} // namespace paddle
diff --git a/paddle/function/Im2Col.h b/paddle/function/Im2Col.h
index 9b91e223a6a28586b11fe7ed4a44421e029a67bb..1e0cff436ff60d5a029e89657d00af2b0bf8b454 100644
--- a/paddle/function/Im2Col.h
+++ b/paddle/function/Im2Col.h
@@ -94,95 +94,4 @@ public:
int paddingWidth);
};
-template
-struct Padding {
- static void run(const T* src,
- T* dest,
- int channels,
- int inputHeight,
- int inputWidth,
- int paddingHeight,
- int paddingWidth) {
- const int destWidth = inputWidth + 2 * paddingWidth;
- for (int c = 0; c < channels; c++) {
- if (paddingHeight > 0) {
- memset(dest, 0, destWidth * paddingHeight * sizeof(T));
- dest += destWidth * paddingHeight;
- }
-
- for (int i = 0; i < inputHeight; i++) {
- // padding head
- for (int j = 0; j < paddingWidth; j++) {
- *dest++ = T(0);
- }
-
- memcpy(dest, src, inputWidth * sizeof(T));
- dest += inputWidth;
- src += inputWidth;
-
- // padding tail
- for (int j = 0; j < paddingWidth; j++) {
- *dest++ = T(0);
- }
- }
-
- if (paddingHeight > 0) {
- memset(dest, 0, destWidth * paddingHeight * sizeof(T));
- dest += destWidth * paddingHeight;
- }
- }
- }
-};
-
-#if defined(__ARM_NEON__) || defined(__ARM_NEON)
-template <>
-struct Padding {
- static void run(const float* src,
- float* dest,
- int channels,
- int inputHeight,
- int inputWidth,
- int paddingHeight,
- int paddingWidth) {
- const int destWidth = inputWidth + 2 * paddingWidth;
- for (int c = 0; c < channels; c++) {
- if (paddingHeight > 0) {
- memset(dest, 0, destWidth * paddingHeight * sizeof(float));
- dest += destWidth * paddingHeight;
- }
-
- for (int i = 0; i < inputHeight; i++) {
- // padding head
- for (int j = 0; j < paddingWidth; j++) {
- *dest++ = float(0);
- }
-
- int step = inputWidth >> 2;
- int remain = inputWidth & 3;
- for (int s = 0; s < step; s++) {
- float32x4_t s0 = vld1q_f32(src);
- vst1q_f32(dest, s0);
- src += 4;
- dest += 4;
- }
- for (int r = 0; r < remain; r++) {
- *dest++ = *src++;
- }
-
- // padding tail
- for (int j = 0; j < paddingWidth; j++) {
- *dest++ = float(0);
- }
- }
-
- if (paddingHeight > 0) {
- memset(dest, 0, destWidth * paddingHeight * sizeof(float));
- dest += destWidth * paddingHeight;
- }
- }
- }
-};
-
-#endif
-
} // namespace paddle
diff --git a/paddle/function/MulOp.cpp b/paddle/function/MulOp.cpp
index 91b4b8ed91b6055babcfbab8f7adb2c55e2747d0..25e41edad54bec0f76a3de4799fab14241407272 100644
--- a/paddle/function/MulOp.cpp
+++ b/paddle/function/MulOp.cpp
@@ -13,18 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "MulOp.h"
-/// todo(tianbing), delete it
-#include
-#include "paddle/math/MathFunctions.h"
+#include "GemmFunctor.h"
#include "paddle/math/SIMDFunctions.h"
#include "paddle/utils/ThreadLocal.h"
-#ifndef PADDLE_TYPE_DOUBLE
-#define GEMM paddle::gemm
-#else
-#define GEMM paddle::gemm
-#endif
-
namespace {
inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) {
for (unsigned int i = 0; i < len; ++i) {
@@ -114,19 +106,20 @@ void MulOp(CpuMatrix& out,
real scaleT,
bool aTrans,
bool bTrans) {
- GEMM(aTrans ? CblasTrans : CblasNoTrans,
- bTrans ? CblasTrans : CblasNoTrans,
- out.getHeight(),
- out.getWidth(),
- !aTrans ? a.getWidth() : a.getHeight(),
- scaleAB,
- a.getData(),
- a.getStride(),
- b.getData(),
- b.getStride(),
- scaleT,
- out.getData(),
- out.getStride());
+ BlasGemm::compute(
+ aTrans,
+ bTrans,
+ out.getHeight(),
+ out.getWidth(),
+ !aTrans ? a.getWidth() : a.getHeight(),
+ scaleAB,
+ a.getData(),
+ a.getStride(),
+ b.getData(),
+ b.getStride(),
+ scaleT,
+ out.getData(),
+ out.getStride());
}
/// dense matrix (+)= sparse matrix * dense matrix
diff --git a/paddle/function/SwitchOp.cpp b/paddle/function/SwitchOp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..01e252a8dc0cd5fa1e964efa01d04cf282b3dfe7
--- /dev/null
+++ b/paddle/function/SwitchOp.cpp
@@ -0,0 +1,140 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "SwitchOp.h"
+#include "paddle/math/Vector.h"
+
+namespace paddle {
+
+template <>
+void NCHW2NHWC(real* outputs,
+ const real* inputs,
+ const int num,
+ const int inC,
+ const int inH,
+ const int inW,
+ const int argType) {
+ for (int n = 0; n < num; ++n) {
+ for (int c = 0; c < inC; ++c) {
+ for (int h = 0; h < inH; ++h) {
+ for (int w = 0; w < inW; ++w) {
+ if (argType == ADD_TO) {
+ outputs[((n * inH + h) * inW + w) * inC + c] += *(inputs++);
+ } else {
+ outputs[((n * inH + h) * inW + w) * inC + c] = *(inputs++);
+ }
+ }
+ }
+ }
+ }
+}
+
+template <>
+void NHWC2NCHW(real* outputs,
+ const real* inputs,
+ const int num,
+ const int inH,
+ const int inW,
+ const int inC,
+ const int argType) {
+ for (int n = 0; n < num; ++n) {
+ for (int h = 0; h < inH; ++h) {
+ for (int w = 0; w < inW; ++w) {
+ for (int c = 0; c < inC; ++c) {
+ if (argType == ADD_TO) {
+ outputs[((n * inC + c) * inH + h) * inW + w] += *(inputs++);
+ } else {
+ outputs[((n * inC + c) * inH + h) * inW + w] = *(inputs++);
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * \brief Switch dimension order of image input.
+ * The input and output is a 4D tensor. Switch order
+ * 'batch_size,channels, height, width' to
+ * order 'batch_size, height, width, channels'.
+ *
+ * Argument in this Function:
+ * \param inputs input data with order 'batch_size,channels, height, width'.
+ * \param outputs output data with order 'batch_size, height, width, channels'.
+ */
+template
+class NCHW2NHWCFunc : public FunctionBase {
+public:
+ void init(const FuncConfig& config) override {}
+
+ void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
+ CHECK_EQ(1UL, inputs.size());
+ CHECK_EQ(1UL, outputs.size());
+
+ size_t num = inputs[0].shape()[0];
+ size_t inC = inputs[0].shape()[1];
+ size_t inH = inputs[0].shape()[2];
+ size_t inW = inputs[0].shape()[3];
+ NCHW2NHWC(outputs[0].data(),
+ inputs[0].data(),
+ num,
+ inC,
+ inH,
+ inW,
+ outputs[0].getArgType());
+ }
+};
+
+/**
+ * \brief Switch dimension order of image input.
+ * The input and output is a 4D tensor. Switch order
+ * 'batch_size, height, width, channels' to
+ * order 'batch_size, channels, height, width'.
+ *
+ * Argument in this Function:
+ * \param inputs input data with order 'batch_size, height, width, channels'.
+ * \param outputs output data with order 'batch_size, channels, height, width'.
+ */
+template
+class NHWC2NCHWFunc : public FunctionBase {
+public:
+ void init(const FuncConfig& config) override {}
+
+ void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
+ CHECK_EQ(1UL, inputs.size());
+ CHECK_EQ(1UL, outputs.size());
+
+ size_t num = inputs[0].shape()[0];
+ size_t inH = inputs[0].shape()[1];
+ size_t inW = inputs[0].shape()[2];
+ size_t inC = inputs[0].shape()[3];
+
+ NHWC2NCHW(outputs[0].data(),
+ inputs[0].data(),
+ num,
+ inH,
+ inW,
+ inC,
+ outputs[0].getArgType());
+ }
+};
+
+REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc);
+REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc);
+#ifndef PADDLE_ONLY_CPU
+REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc);
+REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc);
+#endif
+
+} // namespace paddle
diff --git a/paddle/function/SwitchOp.h b/paddle/function/SwitchOp.h
new file mode 100644
index 0000000000000000000000000000000000000000..e4c1c3ac922f88c3e5424b5943082810aabfacdb
--- /dev/null
+++ b/paddle/function/SwitchOp.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "Function.h"
+
+namespace paddle {
+
+/**
+ * \brief This funtion switch dimension order of image input.
+ * The input and output is a 4D tensor. Switch order 'batch_size,
+ *channels, height, width' to
+ * order 'batch_size, height, width, channels'.
+ *
+ * \param[out] outputs save results.
+ * \param[in] inputs input data.
+ * \param[in] num batch size of input data.
+ * \param[in] inC channel number of input data.
+ * \param[in] inH height of input data.
+ * \param[in] inH with of input data.
+ * \param[in] argType type of output argument.
+ */
+template
+void NCHW2NHWC(real* outputs,
+ const real* inputs,
+ const int num,
+ const int inC,
+ const int inH,
+ const int inW,
+ const int argtype);
+
+/**
+ * \brief This funtion switch dimension order of image input.
+ * The input and output is a 4D tensor. Switch order 'batch_size,
+ *height, width, channels' to
+ * order 'batch_size, channels, height, width'.
+ *
+ * \param[out] inGrad gradients of previous layer.
+ * \param[in] outGrad output gradients.
+ * \param[in] num batch size of input data.
+ * \param[in] inH height of input data.
+ * \param[in] inW with of input data.
+ * \param[in] inC channel number of input data.
+ * \param[in] argType type of output argument.
+ */
+template
+void NHWC2NCHW(real* inGrad,
+ const real* outGrad,
+ const int num,
+ const int inH,
+ const int inW,
+ const int inC,
+ const int argType);
+} // namespace paddle
diff --git a/paddle/function/SwitchOpGpu.cu b/paddle/function/SwitchOpGpu.cu
new file mode 100644
index 0000000000000000000000000000000000000000..45390a56c3f776ec18a65a6ba2f7149a7a6ef6c3
--- /dev/null
+++ b/paddle/function/SwitchOpGpu.cu
@@ -0,0 +1,98 @@
+/* Copyright (c) 2016 Paddle
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "SwitchOp.h"
+#include "hl_base.h"
+
+namespace paddle {
+
+__global__ void KeNCHW2NHWC(real* outputs,
+ const real* inputs,
+ int inC,
+ int inH,
+ int inW,
+ int nthreads,
+ int argType) {
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
+ if (idx < nthreads) {
+ const int w = idx % inW;
+ const int h = (idx / inW) % inH;
+ const int c = (idx / inW / inH) % inC;
+ const int n = idx / inW / inH / inC;
+
+ const int off = ((n * inH + h) * inW + w) * inC + c;
+ if (argType == ADD_TO) {
+ outputs[off] += inputs[idx];
+ } else {
+ outputs[off] = inputs[idx];
+ }
+ }
+}
+
+template <>
+void NCHW2NHWC(real* outputs,
+ const real* inputs,
+ const int num,
+ const int inC,
+ const int inH,
+ const int inW,
+ const int argType) {
+ size_t nth = num * inC * inH * inW;
+ int blockSize = 1024;
+ int gridSize = (nth + 1024 - 1) / 1024;
+ KeNCHW2NHWC<<>>(
+ outputs, inputs, inC, inH, inW, nth, argType);
+ CHECK_SYNC("NCHW2NHWC");
+}
+
+__global__ void KeNHWC2NCHW(real* outputs,
+ const real* inputs,
+ int inH,
+ int inW,
+ int inC,
+ int nthreads,
+ int argType) {
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
+ if (idx < nthreads) {
+ const int c = idx % inC;
+ const int w = (idx / inC) % inW;
+ const int h = (idx / inC / inW) % inH;
+ const int n = idx / inW / inH / inC;
+
+ const int off = ((n * inC + c) * inH + h) * inW + w;
+ if (argType == ADD_TO) {
+ outputs[off] += inputs[idx];
+ } else {
+ outputs[off] = inputs[idx];
+ }
+ }
+}
+
+template <>
+void NHWC2NCHW(real* outputs,
+ const real* inputs,
+ const int num,
+ const int inH,
+ const int inW,
+ const int inC,
+ const int argType) {
+ int nth = num * inC * inH * inW;
+ int blockSize = 1024;
+ int gridSize = (nth + 1024 - 1) / 1024;
+ KeNHWC2NCHW<<>>(
+ outputs, inputs, inH, inW, inC, nth, argType);
+ CHECK_SYNC("NHWC2NCHW");
+}
+
+} // namespace paddle
diff --git a/paddle/function/SwitchOpTest.cpp b/paddle/function/SwitchOpTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..03b0dd66ddcbab713969ed747601ecb1b2eb7955
--- /dev/null
+++ b/paddle/function/SwitchOpTest.cpp
@@ -0,0 +1,44 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include
+#include "FunctionTest.h"
+
+namespace paddle {
+
+TEST(Pad, real) {
+ for (size_t numSamples : {1, 4, 8, 16}) {
+ for (size_t channels : {1, 4, 8, 16}) {
+ for (size_t imgSizeH : {1, 4, 8, 16}) {
+ for (size_t imgSizeW : {1, 4, 8, 16}) {
+ VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
+ << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
+ for (bool test_grad : {true, false}) {
+ CpuGpuFuncCompare compare(test_grad ? "NHWC2NCHW" : "NCHW2NHWC",
+ FuncConfig());
+ TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW};
+ TensorShape outDims{numSamples, imgSizeH, imgSizeW, channels};
+ compare.addInputs(
+ BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims));
+ compare.addOutputs(BufferArg(
+ VALUE_TYPE_FLOAT, test_grad ? inDims : outDims, ASSIGN_TO));
+ compare.run();
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace paddle
diff --git a/paddle/function/neon/NeonDepthwiseConv.cpp b/paddle/function/neon/NeonDepthwiseConv.cpp
index f09e98587d1681d29a79a9cb0303c2d4356c6935..18126152ea0b4ebfe4ec5c8084479787814ed173 100644
--- a/paddle/function/neon/NeonDepthwiseConv.cpp
+++ b/paddle/function/neon/NeonDepthwiseConv.cpp
@@ -12,468 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
-#include "neon_util.h"
+#include "NeonDepthwiseConv.h"
#include "paddle/function/ConvOp.h"
-#include "paddle/function/Im2Col.h"
namespace paddle {
-namespace neon {
-
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
-template
-struct DepthwiseConvKernel {};
-
-inline float32_t conv3x3(float32x4_t r0,
- float32x4_t r1,
- float32x4_t r2,
- float32x4_t k0,
- float32x4_t k1,
- float32x4_t k2) {
- float32x4_t tmp;
- tmp = vmulq_f32(r0, k0);
- tmp = vmlaq_f32(tmp, r1, k1);
- tmp = vmlaq_f32(tmp, r2, k2);
- return vaddvq_f32(tmp);
-}
-
-inline float32_t conv4x4(float32x4_t r0,
- float32x4_t r1,
- float32x4_t r2,
- float32x4_t r3,
- float32x4_t k0,
- float32x4_t k1,
- float32x4_t k2,
- float32x4_t k3) {
- float32x4_t tmp;
- tmp = vmulq_f32(r0, k0);
- tmp = vmlaq_f32(tmp, r1, k1);
- tmp = vmlaq_f32(tmp, r2, k2);
- tmp = vmlaq_f32(tmp, r3, k3);
- return vaddvq_f32(tmp);
-}
-
-/**
- * Each step calculates four elements of the output.
- * First step:
- * R0[0, 1, 2, 3...] * K[0][0]
- * R0[1, 2, 3, 4...] * K[0][1]
- * R0[2, 3, 4, 5...] * K[0][2]
- * R1[0, 1, 2, 3...] * K[1][0]
- * R1[1, 2, 3, 4...] * K[1][1]
- * R1[2, 3, 4, 5...] * K[1][2]
- * R2[0, 1, 2, 3...] * K[2][0]
- * R2[1, 2, 3, 4...] * K[2][1]
- * + R2[2, 3, 4, 5...] * K[2][2]
- * ------------------------------
- * Output[0, 1, 2, 3]
- */
-template <>
-struct DepthwiseConvKernel<3, 1> {
- static void run(const float* inputData,
- const float* filterData,
- int inputHeight,
- int inputWidth,
- int outputChannels,
- int outputHeight,
- int outputWidth,
- int filterMultiplier,
- float* outputData) {
- const int steps = outputWidth >> 2;
- const int remain = outputWidth & 3;
- for (int c = 0; c < outputChannels; c++, filterData += 9) {
- // Load the filters
- float32x4_t k[3];
- k[0] = vld1q_f32(filterData);
- k[1] = vld1q_f32(filterData + 3);
- k[2] = vld1q_f32(filterData + 6);
- k[0] = vsetq_lane_f32(0.f, k[0], 3);
- k[1] = vsetq_lane_f32(0.f, k[1], 3);
- k[2] = vsetq_lane_f32(0.f, k[2], 3);
-
- const float* r0 =
- inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
- const float* r1 = r0 + inputWidth;
- const float* r2 = r0 + inputWidth * 2;
- float32x4_t input[3][3];
- for (int h = 0; h < outputHeight; h++) {
- for (int s = 0; s < steps; s++) {
- // Load the inputs
- float32x4_t tmp;
- input[0][0] = vld1q_f32(r0);
- tmp = vld1q_f32(r0 + 4);
- input[0][1] = vextq_f32(input[0][0], tmp, 1);
- input[0][2] = vextq_f32(input[0][0], tmp, 2);
- input[1][0] = vld1q_f32(r1);
- tmp = vld1q_f32(r1 + 4);
- input[1][1] = vextq_f32(input[1][0], tmp, 1);
- input[1][2] = vextq_f32(input[1][0], tmp, 2);
- input[2][0] = vld1q_f32(r2);
- tmp = vld1q_f32(r2 + 4);
- input[2][1] = vextq_f32(input[2][0], tmp, 1);
- input[2][2] = vextq_f32(input[2][0], tmp, 2);
-
- float32x4_t tmp1 = vdupq_n_f32(0.f);
- float32x4_t tmp2 = vdupq_n_f32(0.f);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
- tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
- tmp1 = vaddq_f32(tmp1, tmp2);
-
- vst1q_f32(outputData, tmp1);
- r0 += 4;
- r1 += 4;
- r2 += 4;
- outputData += 4;
- }
-
- for (int r = 0; r < remain; r++) {
- float32x4_t i0 = vld1q_f32(r0);
- float32x4_t i1 = vld1q_f32(r1);
- float32x4_t i2 = vld1q_f32(r2);
- *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
- r0++;
- r1++;
- r2++;
- outputData++;
- }
-
- r0 += 2;
- r1 += 2;
- r2 += 2;
- }
- }
- }
-};
-
-/**
- * Each step calculates four elements of the output.
- * First step:
- * R0[0, 2, 4, 6...] * K[0][0]
- * R0[1, 3, 5, 7...] * K[0][1]
- * R0[2, 4, 6, 8...] * K[0][2]
- * R1[0, 2, 4, 6...] * K[1][0]
- * R1[1, 3, 5, 7...] * K[1][1]
- * R1[2, 4, 6, 8...] * K[1][2]
- * R2[0, 2, 4, 6...] * K[2][0]
- * R2[1, 3, 5, 7...] * K[2][1]
- * R2[2, 4, 6, 8...] * K[2][2]
- * ------------------------------
- * Output[0, 1, 2, 3]
- */
-template <>
-struct DepthwiseConvKernel<3, 2> {
- static void run(const float* inputData,
- const float* filterData,
- int inputHeight,
- int inputWidth,
- int outputChannels,
- int outputHeight,
- int outputWidth,
- int filterMultiplier,
- float* outputData) {
- const int steps = outputWidth >> 2;
- const int remain = outputWidth & 3;
- for (int c = 0; c < outputChannels; c++, filterData += 9) {
- // Load the filters
- float32x4_t k[3];
- k[0] = vld1q_f32(filterData);
- k[1] = vld1q_f32(filterData + 3);
- k[2] = vld1q_f32(filterData + 6);
- k[0] = vsetq_lane_f32(0.f, k[0], 3);
- k[1] = vsetq_lane_f32(0.f, k[1], 3);
- k[2] = vsetq_lane_f32(0.f, k[2], 3);
-
- const float* start =
- inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
- float32x4_t input[3][3];
- for (int h = 0; h < outputHeight; h++) {
- const float* r0 = start + 2 * h * inputWidth;
- const float* r1 = start + (2 * h + 1) * inputWidth;
- const float* r2 = start + (2 * h + 2) * inputWidth;
- for (int s = 0; s < steps; s++) {
- // Load the inputs
- float32x4_t data1;
- float32x4x2_t data2;
-
- data2 = vld2q_f32(r0);
- input[0][0] = data2.val[0];
- input[0][1] = data2.val[1];
- data1 = vld1q_f32(r0 + 8);
- input[0][2] = vextq_f32(data2.val[0], data1, 1);
-
- data2 = vld2q_f32(r1);
- input[1][0] = data2.val[0];
- input[1][1] = data2.val[1];
- data1 = vld1q_f32(r1 + 8);
- input[1][2] = vextq_f32(data2.val[0], data1, 1);
-
- data2 = vld2q_f32(r2);
- input[2][0] = data2.val[0];
- input[2][1] = data2.val[1];
- data1 = vld1q_f32(r2 + 8);
- input[2][2] = vextq_f32(data2.val[0], data1, 1);
-
- float32x4_t tmp1 = vdupq_n_f32(0.f);
- float32x4_t tmp2 = vdupq_n_f32(0.f);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
- tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
- tmp1 = vaddq_f32(tmp1, tmp2);
-
- vst1q_f32(outputData, tmp1);
- r0 += 8;
- r1 += 8;
- r2 += 8;
- outputData += 4;
- }
-
- for (int r = 0; r < remain; r++) {
- float32x4_t i0 = vld1q_f32(r0);
- float32x4_t i1 = vld1q_f32(r1);
- float32x4_t i2 = vld1q_f32(r2);
- *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
- r0 += 2;
- r1 += 2;
- r2 += 2;
- outputData++;
- }
- }
- }
- }
-};
-
-/**
- * Each step calculates four elements of the output.
- */
-template <>
-struct DepthwiseConvKernel<4, 1> {
- static void run(const float* inputData,
- const float* filterData,
- int inputHeight,
- int inputWidth,
- int outputChannels,
- int outputHeight,
- int outputWidth,
- int filterMultiplier,
- float* outputData) {
- const int steps = outputWidth >> 2;
- const int remain = outputWidth & 3;
- for (int c = 0; c < outputChannels; c++, filterData += 16) {
- // Load the filters
- float32x4_t k[4];
- k[0] = vld1q_f32(filterData);
- k[1] = vld1q_f32(filterData + 4);
- k[2] = vld1q_f32(filterData + 8);
- k[3] = vld1q_f32(filterData + 12);
-
- const float* r0 =
- inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
- const float* r1 = r0 + inputWidth;
- const float* r2 = r0 + inputWidth * 2;
- const float* r3 = r0 + inputWidth * 3;
- float32x4_t input[4][4];
- for (int h = 0; h < outputHeight; h++) {
- for (int s = 0; s < steps; s++) {
- // Load the inputs
- float32x4_t tmp;
- input[0][0] = vld1q_f32(r0);
- tmp = vld1q_f32(r0 + 4);
- input[0][1] = vextq_f32(input[0][0], tmp, 1);
- input[0][2] = vextq_f32(input[0][0], tmp, 2);
- input[0][3] = vextq_f32(input[0][0], tmp, 3);
-
- input[1][0] = vld1q_f32(r1);
- tmp = vld1q_f32(r1 + 4);
- input[1][1] = vextq_f32(input[1][0], tmp, 1);
- input[1][2] = vextq_f32(input[1][0], tmp, 2);
- input[1][3] = vextq_f32(input[1][0], tmp, 3);
-
- input[2][0] = vld1q_f32(r2);
- tmp = vld1q_f32(r2 + 4);
- input[2][1] = vextq_f32(input[2][0], tmp, 1);
- input[2][2] = vextq_f32(input[2][0], tmp, 2);
- input[2][3] = vextq_f32(input[2][0], tmp, 3);
-
- input[3][0] = vld1q_f32(r3);
- tmp = vld1q_f32(r3 + 4);
- input[3][1] = vextq_f32(input[3][0], tmp, 1);
- input[3][2] = vextq_f32(input[3][0], tmp, 2);
- input[3][3] = vextq_f32(input[3][0], tmp, 3);
-
- float32x4_t tmp1 = vdupq_n_f32(0.f);
- float32x4_t tmp2 = vdupq_n_f32(0.f);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
- tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
- tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
- tmp1 = vaddq_f32(tmp1, tmp2);
-
- vst1q_f32(outputData, tmp1);
- r0 += 4;
- r1 += 4;
- r2 += 4;
- r3 += 4;
- outputData += 4;
- }
-
- for (int r = 0; r < remain; r++) {
- float32x4_t i0 = vld1q_f32(r0);
- float32x4_t i1 = vld1q_f32(r1);
- float32x4_t i2 = vld1q_f32(r2);
- float32x4_t i3 = vld1q_f32(r3);
- *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
- r0++;
- r1++;
- r2++;
- r3++;
- outputData++;
- }
-
- r0 += 3;
- r1 += 3;
- r2 += 3;
- r3 += 3;
- }
- }
- }
-};
-
-/**
- * Each step calculates four elements of the output.
- */
-template <>
-struct DepthwiseConvKernel<4, 2> {
- static void run(const float* inputData,
- const float* filterData,
- int inputHeight,
- int inputWidth,
- int outputChannels,
- int outputHeight,
- int outputWidth,
- int filterMultiplier,
- float* outputData) {
- const int steps = outputWidth >> 2;
- const int remain = outputWidth & 3;
- for (int c = 0; c < outputChannels; c++, filterData += 16) {
- // Load the filters
- float32x4_t k[4];
- k[0] = vld1q_f32(filterData);
- k[1] = vld1q_f32(filterData + 4);
- k[2] = vld1q_f32(filterData + 8);
- k[3] = vld1q_f32(filterData + 12);
-
- const float* start =
- inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
- float32x4_t input[4][4];
- for (int h = 0; h < outputHeight; h++) {
- const float* r0 = start + 2 * h * inputWidth;
- const float* r1 = start + (2 * h + 1) * inputWidth;
- const float* r2 = start + (2 * h + 2) * inputWidth;
- const float* r3 = start + (2 * h + 3) * inputWidth;
- for (int s = 0; s < steps; s++) {
- // Load the inputs
- float32x4x2_t data1;
- float32x4x2_t data2;
-
- data1 = vld2q_f32(r0);
- data2 = vld2q_f32(r0 + 8);
- input[0][0] = data1.val[0];
- input[0][1] = data1.val[1];
- input[0][2] = vextq_f32(data1.val[0], data2.val[0], 1);
- input[0][3] = vextq_f32(data1.val[1], data2.val[1], 1);
-
- data1 = vld2q_f32(r1);
- data2 = vld2q_f32(r1 + 8);
- input[1][0] = data1.val[0];
- input[1][1] = data1.val[1];
- input[1][2] = vextq_f32(data1.val[0], data2.val[0], 1);
- input[1][3] = vextq_f32(data1.val[1], data2.val[1], 1);
-
- data1 = vld2q_f32(r2);
- data2 = vld2q_f32(r2 + 8);
- input[2][0] = data1.val[0];
- input[2][1] = data1.val[1];
- input[2][2] = vextq_f32(data1.val[0], data2.val[0], 1);
- input[2][3] = vextq_f32(data1.val[1], data2.val[1], 1);
-
- data1 = vld2q_f32(r3);
- data2 = vld2q_f32(r3 + 8);
- input[3][0] = data1.val[0];
- input[3][1] = data1.val[1];
- input[3][2] = vextq_f32(data1.val[0], data2.val[0], 1);
- input[3][3] = vextq_f32(data1.val[1], data2.val[1], 1);
-
- float32x4_t tmp1 = vdupq_n_f32(0.f);
- float32x4_t tmp2 = vdupq_n_f32(0.f);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
- tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
- tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
- tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
- tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
- tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
- tmp1 = vaddq_f32(tmp1, tmp2);
-
- vst1q_f32(outputData, tmp1);
- r0 += 8;
- r1 += 8;
- r2 += 8;
- r3 += 8;
- outputData += 4;
- }
-
- for (int r = 0; r < remain; r++) {
- float32x4_t i0 = vld1q_f32(r0);
- float32x4_t i1 = vld1q_f32(r1);
- float32x4_t i2 = vld1q_f32(r2);
- float32x4_t i3 = vld1q_f32(r3);
- *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
- r0 += 2;
- r1 += 2;
- r2 += 2;
- r3 += 2;
- outputData++;
- }
- }
- }
- }
-};
-
template
class NeonDepthwiseConvFunction : public ConvFunctionBase {
public:
@@ -497,16 +42,16 @@ public:
const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape();
- size_t batchSize = input[0];
- size_t inputChannels = input[1];
- size_t inputHeight = input[2];
- size_t inputWidth = input[3];
- size_t filterHeight = getFilterHeight(filter);
- size_t filterWidth = getFilterWidth(filter);
- size_t outputChannels = output[1];
- size_t outputHeight = output[2];
- size_t outputWidth = output[3];
- size_t filterMultiplier = outputChannels / groups_;
+ int batchSize = input[0];
+ int inputChannels = input[1];
+ int inputHeight = input[2];
+ int inputWidth = input[3];
+ int filterHeight = getFilterHeight(filter);
+ int filterWidth = getFilterWidth(filter);
+ int outputChannels = output[1];
+ int outputHeight = output[2];
+ int outputWidth = output[3];
+ int filterMultiplier = outputChannels / groups_;
CHECK_EQ(inputChannels, groups_);
// only support strideH() == strideW() and filterHeight == filterWidth.
@@ -519,22 +64,19 @@ public:
// padding the input
float* inputPadding = inputData;
+ int padInputHeight = inputHeight + 2 * paddingH();
+ int padInputWidth = inputWidth + 2 * paddingW();
if (paddingH() > 0 || paddingW() > 0) {
- int newSize = batchSize * inputChannels * (inputHeight + 2 * paddingH()) *
- (inputWidth + 2 * paddingW());
+ int newSize = batchSize * inputChannels * padInputHeight * padInputWidth;
resizeBuffer(newSize);
inputPadding = reinterpret_cast(memory_->getBuf());
- Padding::run(inputData,
- inputPadding,
- batchSize * inputChannels,
- inputHeight,
- inputWidth,
- paddingH(),
- paddingW());
-
- // height and width of padding data
- inputHeight += 2 * paddingH();
- inputWidth += 2 * paddingW();
+ neon::Padding::run(inputData,
+ inputPadding,
+ batchSize * inputChannels,
+ inputHeight,
+ inputWidth,
+ padInputHeight,
+ padInputWidth);
}
std::function::run;
+ DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run;
} else if (filterWidth == 3 && strideW() == 2) {
- DepthWiseConv = DepthwiseConvKernel<3, 2>::run;
+ DepthWiseConv = neon::DepthwiseConvKernel<3, 2>::run;
} else if (filterWidth == 4 && strideW() == 1) {
- DepthWiseConv = DepthwiseConvKernel<4, 1>::run;
+ DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run;
} else if (filterWidth == 4 && strideW() == 2) {
- DepthWiseConv = DepthwiseConvKernel<4, 2>::run;
+ DepthWiseConv = neon::DepthwiseConvKernel<4, 2>::run;
} else {
LOG(FATAL) << "Not supported";
}
- for (size_t i = 0; i < batchSize; i++) {
+ for (int i = 0; i < batchSize; i++) {
DepthWiseConv(inputPadding,
filterData,
- inputHeight,
- inputWidth,
+ padInputHeight,
+ padInputWidth,
outputChannels,
outputHeight,
outputWidth,
filterMultiplier,
outputData);
- inputPadding += inputChannels * inputHeight * inputWidth;
+ inputPadding += inputChannels * padInputHeight * padInputWidth;
outputData += outputChannels * outputHeight * outputWidth;
}
}
};
+#ifndef PADDLE_TYPE_DOUBLE
REGISTER_TYPED_FUNC(NeonDepthwiseConv, CPU, NeonDepthwiseConvFunction);
+#endif
#endif
-} // namespace neon
} // namespace paddle
diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/function/neon/NeonDepthwiseConv.h
new file mode 100644
index 0000000000000000000000000000000000000000..33722d3cac61b62f5dce8f51105c1bf4e70c4a6c
--- /dev/null
+++ b/paddle/function/neon/NeonDepthwiseConv.h
@@ -0,0 +1,631 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include
+#include "neon_util.h"
+
+namespace paddle {
+
+namespace neon {
+
+#if defined(__ARM_NEON__) || defined(__ARM_NEON)
+
+template
+struct DepthwiseConvKernel {};
+
+inline float32_t conv3x3(float32x4_t r0,
+ float32x4_t r1,
+ float32x4_t r2,
+ float32x4_t k0,
+ float32x4_t k1,
+ float32x4_t k2) {
+ float32x4_t tmp;
+ tmp = vmulq_f32(r0, k0);
+ tmp = vmlaq_f32(tmp, r1, k1);
+ tmp = vmlaq_f32(tmp, r2, k2);
+ return vaddvq_f32(tmp);
+}
+
+inline float32_t conv4x4(float32x4_t r0,
+ float32x4_t r1,
+ float32x4_t r2,
+ float32x4_t r3,
+ float32x4_t k0,
+ float32x4_t k1,
+ float32x4_t k2,
+ float32x4_t k3) {
+ float32x4_t tmp;
+ tmp = vmulq_f32(r0, k0);
+ tmp = vmlaq_f32(tmp, r1, k1);
+ tmp = vmlaq_f32(tmp, r2, k2);
+ tmp = vmlaq_f32(tmp, r3, k3);
+ return vaddvq_f32(tmp);
+}
+
+/**
+ * Each step calculates four elements of the output.
+ * First step:
+ * R0[0, 1, 2, 3...] * K[0][0]
+ * R0[1, 2, 3, 4...] * K[0][1]
+ * R0[2, 3, 4, 5...] * K[0][2]
+ * R1[0, 1, 2, 3...] * K[1][0]
+ * R1[1, 2, 3, 4...] * K[1][1]
+ * R1[2, 3, 4, 5...] * K[1][2]
+ * R2[0, 1, 2, 3...] * K[2][0]
+ * R2[1, 2, 3, 4...] * K[2][1]
+ * + R2[2, 3, 4, 5...] * K[2][2]
+ * ------------------------------
+ * Output[0, 1, 2, 3]
+ */
+template <>
+struct DepthwiseConvKernel<3, 1> {
+ static void run(const float* inputData,
+ const float* filterData,
+ int inputHeight,
+ int inputWidth,
+ int outputChannels,
+ int outputHeight,
+ int outputWidth,
+ int filterMultiplier,
+ float* outputData) {
+ const int steps = outputWidth >> 2;
+ const int remain = outputWidth & 3;
+ for (int c = 0; c < outputChannels; c++, filterData += 9) {
+ // Load the filters
+ float32x4_t k[3];
+ k[0] = vld1q_f32(filterData);
+ k[1] = vld1q_f32(filterData + 3);
+ k[2] = vld1q_f32(filterData + 6);
+ k[0] = vsetq_lane_f32(0.f, k[0], 3);
+ k[1] = vsetq_lane_f32(0.f, k[1], 3);
+ k[2] = vsetq_lane_f32(0.f, k[2], 3);
+
+ const float* r0 =
+ inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
+ const float* r1 = r0 + inputWidth;
+ const float* r2 = r0 + inputWidth * 2;
+ float32x4_t input[3][3];
+ for (int h = 0; h < outputHeight; h++) {
+ for (int s = 0; s < steps; s++) {
+ // Load the inputs
+ float32x4_t tmp;
+ input[0][0] = vld1q_f32(r0);
+ tmp = vld1q_f32(r0 + 4);
+ input[0][1] = vextq_f32(input[0][0], tmp, 1);
+ input[0][2] = vextq_f32(input[0][0], tmp, 2);
+ input[1][0] = vld1q_f32(r1);
+ tmp = vld1q_f32(r1 + 4);
+ input[1][1] = vextq_f32(input[1][0], tmp, 1);
+ input[1][2] = vextq_f32(input[1][0], tmp, 2);
+ input[2][0] = vld1q_f32(r2);
+ tmp = vld1q_f32(r2 + 4);
+ input[2][1] = vextq_f32(input[2][0], tmp, 1);
+ input[2][2] = vextq_f32(input[2][0], tmp, 2);
+
+ float32x4_t tmp1 = vdupq_n_f32(0.f);
+ float32x4_t tmp2 = vdupq_n_f32(0.f);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
+ tmp1 = vaddq_f32(tmp1, tmp2);
+
+ vst1q_f32(outputData, tmp1);
+ r0 += 4;
+ r1 += 4;
+ r2 += 4;
+ outputData += 4;
+ }
+
+ for (int r = 0; r < remain; r++) {
+ float32x4_t i0 = vld1q_f32(r0);
+ float32x4_t i1 = vld1q_f32(r1);
+ float32x4_t i2 = vld1q_f32(r2);
+ *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
+ r0++;
+ r1++;
+ r2++;
+ outputData++;
+ }
+
+ r0 += 2;
+ r1 += 2;
+ r2 += 2;
+ }
+ }
+ }
+};
+
+/**
+ * Each step calculates four elements of the output.
+ * First step:
+ * R0[0, 2, 4, 6...] * K[0][0]
+ * R0[1, 3, 5, 7...] * K[0][1]
+ * R0[2, 4, 6, 8...] * K[0][2]
+ * R1[0, 2, 4, 6...] * K[1][0]
+ * R1[1, 3, 5, 7...] * K[1][1]
+ * R1[2, 4, 6, 8...] * K[1][2]
+ * R2[0, 2, 4, 6...] * K[2][0]
+ * R2[1, 3, 5, 7...] * K[2][1]
+ * R2[2, 4, 6, 8...] * K[2][2]
+ * ------------------------------
+ * Output[0, 1, 2, 3]
+ */
+template <>
+struct DepthwiseConvKernel<3, 2> {
+ static void run(const float* inputData,
+ const float* filterData,
+ int inputHeight,
+ int inputWidth,
+ int outputChannels,
+ int outputHeight,
+ int outputWidth,
+ int filterMultiplier,
+ float* outputData) {
+ const int steps = outputWidth >> 2;
+ const int remain = outputWidth & 3;
+ for (int c = 0; c < outputChannels; c++, filterData += 9) {
+ // Load the filters
+ float32x4_t k[3];
+ k[0] = vld1q_f32(filterData);
+ k[1] = vld1q_f32(filterData + 3);
+ k[2] = vld1q_f32(filterData + 6);
+ k[0] = vsetq_lane_f32(0.f, k[0], 3);
+ k[1] = vsetq_lane_f32(0.f, k[1], 3);
+ k[2] = vsetq_lane_f32(0.f, k[2], 3);
+
+ const float* start =
+ inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
+ float32x4_t input[3][3];
+ for (int h = 0; h < outputHeight; h++) {
+ const float* r0 = start + 2 * h * inputWidth;
+ const float* r1 = start + (2 * h + 1) * inputWidth;
+ const float* r2 = start + (2 * h + 2) * inputWidth;
+ for (int s = 0; s < steps; s++) {
+ // Load the inputs
+ float32x4_t data1;
+ float32x4x2_t data2;
+
+ data2 = vld2q_f32(r0);
+ input[0][0] = data2.val[0];
+ input[0][1] = data2.val[1];
+ data1 = vld1q_f32(r0 + 8);
+ input[0][2] = vextq_f32(data2.val[0], data1, 1);
+
+ data2 = vld2q_f32(r1);
+ input[1][0] = data2.val[0];
+ input[1][1] = data2.val[1];
+ data1 = vld1q_f32(r1 + 8);
+ input[1][2] = vextq_f32(data2.val[0], data1, 1);
+
+ data2 = vld2q_f32(r2);
+ input[2][0] = data2.val[0];
+ input[2][1] = data2.val[1];
+ data1 = vld1q_f32(r2 + 8);
+ input[2][2] = vextq_f32(data2.val[0], data1, 1);
+
+ float32x4_t tmp1 = vdupq_n_f32(0.f);
+ float32x4_t tmp2 = vdupq_n_f32(0.f);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
+ tmp1 = vaddq_f32(tmp1, tmp2);
+
+ vst1q_f32(outputData, tmp1);
+ r0 += 8;
+ r1 += 8;
+ r2 += 8;
+ outputData += 4;
+ }
+
+ for (int r = 0; r < remain; r++) {
+ float32x4_t i0 = vld1q_f32(r0);
+ float32x4_t i1 = vld1q_f32(r1);
+ float32x4_t i2 = vld1q_f32(r2);
+ *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
+ r0 += 2;
+ r1 += 2;
+ r2 += 2;
+ outputData++;
+ }
+ }
+ }
+ }
+};
+
+/**
+ * Each step calculates four elements of the output.
+ */
+template <>
+struct DepthwiseConvKernel<4, 1> {
+ static void run(const float* inputData,
+ const float* filterData,
+ int inputHeight,
+ int inputWidth,
+ int outputChannels,
+ int outputHeight,
+ int outputWidth,
+ int filterMultiplier,
+ float* outputData) {
+ const int steps = outputWidth >> 2;
+ const int remain = outputWidth & 3;
+ for (int c = 0; c < outputChannels; c++, filterData += 16) {
+ // Load the filters
+ float32x4_t k[4];
+ k[0] = vld1q_f32(filterData);
+ k[1] = vld1q_f32(filterData + 4);
+ k[2] = vld1q_f32(filterData + 8);
+ k[3] = vld1q_f32(filterData + 12);
+
+ const float* r0 =
+ inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
+ const float* r1 = r0 + inputWidth;
+ const float* r2 = r0 + inputWidth * 2;
+ const float* r3 = r0 + inputWidth * 3;
+ float32x4_t input[4][4];
+ for (int h = 0; h < outputHeight; h++) {
+ for (int s = 0; s < steps; s++) {
+ // Load the inputs
+ float32x4_t tmp;
+ input[0][0] = vld1q_f32(r0);
+ tmp = vld1q_f32(r0 + 4);
+ input[0][1] = vextq_f32(input[0][0], tmp, 1);
+ input[0][2] = vextq_f32(input[0][0], tmp, 2);
+ input[0][3] = vextq_f32(input[0][0], tmp, 3);
+
+ input[1][0] = vld1q_f32(r1);
+ tmp = vld1q_f32(r1 + 4);
+ input[1][1] = vextq_f32(input[1][0], tmp, 1);
+ input[1][2] = vextq_f32(input[1][0], tmp, 2);
+ input[1][3] = vextq_f32(input[1][0], tmp, 3);
+
+ input[2][0] = vld1q_f32(r2);
+ tmp = vld1q_f32(r2 + 4);
+ input[2][1] = vextq_f32(input[2][0], tmp, 1);
+ input[2][2] = vextq_f32(input[2][0], tmp, 2);
+ input[2][3] = vextq_f32(input[2][0], tmp, 3);
+
+ input[3][0] = vld1q_f32(r3);
+ tmp = vld1q_f32(r3 + 4);
+ input[3][1] = vextq_f32(input[3][0], tmp, 1);
+ input[3][2] = vextq_f32(input[3][0], tmp, 2);
+ input[3][3] = vextq_f32(input[3][0], tmp, 3);
+
+ float32x4_t tmp1 = vdupq_n_f32(0.f);
+ float32x4_t tmp2 = vdupq_n_f32(0.f);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
+ tmp1 = vaddq_f32(tmp1, tmp2);
+
+ vst1q_f32(outputData, tmp1);
+ r0 += 4;
+ r1 += 4;
+ r2 += 4;
+ r3 += 4;
+ outputData += 4;
+ }
+
+ for (int r = 0; r < remain; r++) {
+ float32x4_t i0 = vld1q_f32(r0);
+ float32x4_t i1 = vld1q_f32(r1);
+ float32x4_t i2 = vld1q_f32(r2);
+ float32x4_t i3 = vld1q_f32(r3);
+ *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
+ r0++;
+ r1++;
+ r2++;
+ r3++;
+ outputData++;
+ }
+
+ r0 += 3;
+ r1 += 3;
+ r2 += 3;
+ r3 += 3;
+ }
+ }
+ }
+};
+
+/**
+ * Each step calculates four elements of the output.
+ */
+template <>
+struct DepthwiseConvKernel<4, 2> {
+ static void run(const float* inputData,
+ const float* filterData,
+ int inputHeight,
+ int inputWidth,
+ int outputChannels,
+ int outputHeight,
+ int outputWidth,
+ int filterMultiplier,
+ float* outputData) {
+ const int steps = outputWidth >> 2;
+ const int remain = outputWidth & 3;
+ for (int c = 0; c < outputChannels; c++, filterData += 16) {
+ // Load the filters
+ float32x4_t k[4];
+ k[0] = vld1q_f32(filterData);
+ k[1] = vld1q_f32(filterData + 4);
+ k[2] = vld1q_f32(filterData + 8);
+ k[3] = vld1q_f32(filterData + 12);
+
+ const float* start =
+ inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
+ float32x4_t input[4][4];
+ for (int h = 0; h < outputHeight; h++) {
+ const float* r0 = start + 2 * h * inputWidth;
+ const float* r1 = start + (2 * h + 1) * inputWidth;
+ const float* r2 = start + (2 * h + 2) * inputWidth;
+ const float* r3 = start + (2 * h + 3) * inputWidth;
+ for (int s = 0; s < steps; s++) {
+ // Load the inputs
+ float32x4x2_t data1;
+ float32x4x2_t data2;
+
+ data1 = vld2q_f32(r0);
+ data2 = vld2q_f32(r0 + 8);
+ input[0][0] = data1.val[0];
+ input[0][1] = data1.val[1];
+ input[0][2] = vextq_f32(data1.val[0], data2.val[0], 1);
+ input[0][3] = vextq_f32(data1.val[1], data2.val[1], 1);
+
+ data1 = vld2q_f32(r1);
+ data2 = vld2q_f32(r1 + 8);
+ input[1][0] = data1.val[0];
+ input[1][1] = data1.val[1];
+ input[1][2] = vextq_f32(data1.val[0], data2.val[0], 1);
+ input[1][3] = vextq_f32(data1.val[1], data2.val[1], 1);
+
+ data1 = vld2q_f32(r2);
+ data2 = vld2q_f32(r2 + 8);
+ input[2][0] = data1.val[0];
+ input[2][1] = data1.val[1];
+ input[2][2] = vextq_f32(data1.val[0], data2.val[0], 1);
+ input[2][3] = vextq_f32(data1.val[1], data2.val[1], 1);
+
+ data1 = vld2q_f32(r3);
+ data2 = vld2q_f32(r3 + 8);
+ input[3][0] = data1.val[0];
+ input[3][1] = data1.val[1];
+ input[3][2] = vextq_f32(data1.val[0], data2.val[0], 1);
+ input[3][3] = vextq_f32(data1.val[1], data2.val[1], 1);
+
+ float32x4_t tmp1 = vdupq_n_f32(0.f);
+ float32x4_t tmp2 = vdupq_n_f32(0.f);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
+ tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
+ tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
+ tmp1 = vaddq_f32(tmp1, tmp2);
+
+ vst1q_f32(outputData, tmp1);
+ r0 += 8;
+ r1 += 8;
+ r2 += 8;
+ r3 += 8;
+ outputData += 4;
+ }
+
+ for (int r = 0; r < remain; r++) {
+ float32x4_t i0 = vld1q_f32(r0);
+ float32x4_t i1 = vld1q_f32(r1);
+ float32x4_t i2 = vld1q_f32(r2);
+ float32x4_t i3 = vld1q_f32(r3);
+ *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
+ r0 += 2;
+ r1 += 2;
+ r2 += 2;
+ r3 += 2;
+ outputData++;
+ }
+ }
+ }
+ }
+};
+
+template
+struct Padding {
+ static void run(const T* input,
+ T* inputPadding,
+ int channels,
+ int inputHeight,
+ int inputWidth,
+ int padInputHeight,
+ int padInputWidth) {
+ const int paddingHeight = (padInputHeight - inputHeight) / 2;
+ const int paddingWidth = (padInputWidth - inputWidth) / 2;
+ for (int c = 0; c < channels; c++) {
+ if (paddingHeight > 0) {
+ memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(T));
+ inputPadding += padInputWidth * paddingHeight;
+ }
+
+ for (int i = 0; i < inputHeight; i++) {
+ // padding head
+ for (int j = 0; j < paddingWidth; j++) {
+ *inputPadding++ = T(0);
+ }
+
+ memcpy(inputPadding, input, inputWidth * sizeof(T));
+ inputPadding += inputWidth;
+ input += inputWidth;
+
+ // padding tail
+ for (int j = 0; j < paddingWidth; j++) {
+ *inputPadding++ = T(0);
+ }
+ }
+
+ if (paddingHeight > 0) {
+ memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(T));
+ inputPadding += padInputWidth * paddingHeight;
+ }
+ }
+ }
+};
+
+#if defined(__ARM_NEON__) || defined(__ARM_NEON)
+template <>
+struct Padding {
+ static void run(const float* input,
+ float* inputPadding,
+ int channels,
+ int inputHeight,
+ int inputWidth,
+ int padInputHeight,
+ int padInputWidth) {
+ const int paddingHeight = (padInputHeight - inputHeight) / 2;
+ const int paddingWidth = (padInputWidth - inputWidth) / 2;
+ for (int c = 0; c < channels; c++) {
+ if (paddingHeight > 0) {
+ memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
+ inputPadding += padInputWidth * paddingHeight;
+ }
+
+ for (int i = 0; i < inputHeight; i++) {
+ // padding head
+ for (int j = 0; j < paddingWidth; j++) {
+ *inputPadding++ = float(0);
+ }
+
+ int step = inputWidth >> 2;
+ int remain = inputWidth & 3;
+ for (int s = 0; s < step; s++) {
+ float32x4_t s0 = vld1q_f32(input);
+ vst1q_f32(inputPadding, s0);
+ input += 4;
+ inputPadding += 4;
+ }
+ for (int r = 0; r < remain; r++) {
+ *inputPadding++ = *input++;
+ }
+
+ // padding tail
+ for (int j = 0; j < paddingWidth; j++) {
+ *inputPadding++ = float(0);
+ }
+ }
+
+ if (paddingHeight > 0) {
+ memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
+ inputPadding += padInputWidth * paddingHeight;
+ }
+ }
+ }
+};
+
+// for stride is 2
+struct StridePadding {
+ static void run(const float* input,
+ float* inputPadding,
+ int channels,
+ int inputHeight,
+ int inputWidth,
+ int padInputHeight,
+ int padInputWidth) {
+ const int paddingHeight = (padInputHeight - (inputHeight * 2 - 1)) / 2;
+ const int paddingWidth = (padInputWidth - (inputWidth * 2 - 1)) / 2;
+ for (int c = 0; c < channels; c++) {
+ if (paddingHeight > 0) {
+ memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
+ inputPadding += padInputWidth * paddingHeight;
+ }
+
+ for (int i = 0; i < inputHeight; i++) {
+ // padding head
+ for (int j = 0; j < paddingWidth; j++) {
+ *inputPadding++ = float(0);
+ }
+
+ int step = inputWidth >> 2;
+ int remain = inputWidth & 3;
+ float32x4_t s1 = vdupq_n_f32(0.f);
+ for (int s = 0; s < step; s++) {
+ float32x4_t s0 = vld1q_f32(input);
+ float32x4x2_t v = {{s0, s1}};
+ vst2q_f32(inputPadding, v);
+ input += 4;
+ inputPadding += 8;
+ }
+ for (int r = 0; r < remain; r++) {
+ *inputPadding++ = *input++;
+ *inputPadding++ = float(0);
+ }
+ inputPadding--;
+
+ // padding tail
+ for (int j = 0; j < paddingWidth; j++) {
+ *inputPadding++ = float(0);
+ }
+ if (i != inputHeight - 1) {
+ memset(inputPadding, 0, padInputWidth * sizeof(float));
+ inputPadding += padInputWidth;
+ }
+ }
+
+ if (paddingHeight > 0) {
+ memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
+ inputPadding += padInputWidth * paddingHeight;
+ }
+ }
+ }
+};
+
+#endif
+
+#endif
+
+} // namespace neon
+} // namespace paddle
diff --git a/paddle/function/neon/NeonDepthwiseConvTranspose.cpp b/paddle/function/neon/NeonDepthwiseConvTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..49ca4bc8a0947ba329bd991e9f7d001623901a67
--- /dev/null
+++ b/paddle/function/neon/NeonDepthwiseConvTranspose.cpp
@@ -0,0 +1,136 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "NeonDepthwiseConv.h"
+#include "paddle/function/ConvOp.h"
+
+namespace paddle {
+
+#if defined(__ARM_NEON__) || defined(__ARM_NEON)
+
+template
+class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase {
+public:
+ void init(const FuncConfig& config) override {
+ ConvFunctionBase::init(config);
+ }
+
+ void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
+ const TensorShape& input = inputs[0].shape();
+ const TensorShape& filter = inputs[1].shape();
+ const TensorShape& output = outputs[0].shape();
+ checkShape(input, filter, output);
+ }
+
+ void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
+ CHECK_EQ(numInputs_, inputs.size());
+ CHECK_EQ(numOutputs_, outputs.size());
+ check(inputs, outputs);
+
+ const TensorShape& input = inputs[0].shape();
+ const TensorShape& filter = inputs[1].shape();
+ const TensorShape& output = outputs[0].shape();
+
+ int batchSize = input[0];
+ int inputChannels = input[1];
+ int inputHeight = input[2];
+ int inputWidth = input[3];
+ int filterHeight = getFilterHeight(filter);
+ int filterWidth = getFilterWidth(filter);
+ int outputChannels = output[1];
+ int outputHeight = output[2];
+ int outputWidth = output[3];
+ int filterMultiplier = outputChannels / groups_;
+ CHECK_EQ(inputChannels, groups_);
+
+ // only support strideH() == strideW() and filterHeight == filterWidth.
+ CHECK_EQ(strideH(), strideW());
+ CHECK_EQ(paddingH(), paddingW());
+ CHECK_EQ(filterHeight, filterWidth);
+
+ float* inputData = inputs[0].data();
+ float* filterData = inputs[1].data();
+ float* outputData = outputs[0].data();
+
+ // padding the input, input -> inputPadding
+ float* inputPadding = inputData;
+ int padInputHeight =
+ (inputHeight - 1) * strideH() + 2 * filterHeight - 1 - 2 * paddingH();
+ int padInputWidth =
+ (inputWidth - 1) * strideW() + 2 * filterWidth - 1 - 2 * paddingW();
+
+ if (padInputHeight > inputHeight || padInputWidth > inputWidth) {
+ int newSize = batchSize * inputChannels * padInputHeight * padInputWidth;
+ resizeBuffer(newSize);
+ inputPadding = reinterpret_cast(memory_->getBuf());
+ if (strideH() == 1) {
+ neon::Padding::run(inputData,
+ inputPadding,
+ batchSize * inputChannels,
+ inputHeight,
+ inputWidth,
+ padInputHeight,
+ padInputWidth);
+ } else if (strideH() == 2) {
+ neon::StridePadding::run(inputData,
+ inputPadding,
+ batchSize * inputChannels,
+ inputHeight,
+ inputWidth,
+ padInputHeight,
+ padInputWidth);
+ } else {
+ LOG(FATAL) << "Not supported";
+ }
+ }
+
+ std::function
+ DepthWiseConv;
+
+ if (filterWidth == 3) {
+ DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run;
+ } else if (filterWidth == 4) {
+ DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run;
+ } else {
+ LOG(FATAL) << "Not supported";
+ }
+
+ for (int i = 0; i < batchSize; i++) {
+ DepthWiseConv(inputPadding,
+ filterData,
+ padInputHeight,
+ padInputWidth,
+ outputChannels,
+ outputHeight,
+ outputWidth,
+ filterMultiplier,
+ outputData);
+ inputPadding += inputChannels * padInputHeight * padInputWidth;
+ outputData += outputChannels * outputHeight * outputWidth;
+ }
+ }
+};
+
+#ifndef PADDLE_TYPE_DOUBLE
+
+REGISTER_TYPED_FUNC(NeonDepthwiseConvTranspose,
+ CPU,
+ NeonDepthwiseConvTransposeFunction);
+
+#endif
+
+#endif
+
+} // namespace paddle
diff --git a/paddle/function/neon/neon_util.h b/paddle/function/neon/neon_util.h
index 56b3febe2d27bb4fbf57e49079b3ad071d556914..e2db0450675084345ad55559d8988c5375801cc9 100644
--- a/paddle/function/neon/neon_util.h
+++ b/paddle/function/neon/neon_util.h
@@ -33,12 +33,8 @@ inline float32_t vaddvq_f32(float32x4_t a) {
return vget_lane_f32(vpadd_f32(v, v), 0);
}
-inline float32x4_t vmlaq_laneq_f32(float32x4_t a,
- float32x4_t b,
- float32x4_t v,
- const int lane) {
- return vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane));
-}
+#define vmlaq_laneq_f32(a, b, v, lane) \
+ vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane))
#endif
} // namespace neon
diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp
index 1ceaaaa206ee3cbc5421238574c7f310011ccaa5..f7a80e23e1bd49549bec57b360587adc6b423794 100644
--- a/paddle/gserver/layers/BatchNormBaseLayer.cpp
+++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp
@@ -62,14 +62,18 @@ void BatchNormBaseLayer::calFeatureMapSize() {
const ImageConfig& conf = config_.inputs(0).image_conf();
imageH_ = inputLayers_[0]->getOutput().getFrameHeight();
imageW_ = inputLayers_[0]->getOutput().getFrameWidth();
+ imageD_ = inputLayers_[0]->getOutput().getFrameDepth();
+
+ if (0 == imageD_) imageD_ = conf.img_size_z();
if (imageH_ == 0 && imageW_ == 0) {
imageH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size();
imageW_ = conf.img_size();
} else {
getOutput().setFrameHeight(imageH_);
getOutput().setFrameWidth(imageW_);
+ getOutput().setFrameDepth(imageD_);
}
- imgPixels_ = imageH_ * imageW_;
+ imgPixels_ = imageH_ * imageW_ * imageD_;
}
} // namespace paddle
diff --git a/paddle/gserver/layers/BatchNormBaseLayer.h b/paddle/gserver/layers/BatchNormBaseLayer.h
index 230bafc31d96bbd49481a7ed135be6888688627e..e721d2d267a31cae46407673b8b1281e87055608 100644
--- a/paddle/gserver/layers/BatchNormBaseLayer.h
+++ b/paddle/gserver/layers/BatchNormBaseLayer.h
@@ -80,6 +80,7 @@ protected:
/// Height or width of input image feature.
/// Both of them are 1 if the input is fully-connected layer.
+ int imageD_;
int imageH_;
int imageW_;
/// Height * Width.
diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp
index 44ba2c4b7d1562d2ce839b5f4b4de1af35e6925f..49a9540c0b6e36b59ed786287ff5c4569b69a6a5 100644
--- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp
+++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp
@@ -37,7 +37,7 @@ bool CudnnBatchNormLayer::init(const LayerMap& layerMap,
}
void CudnnBatchNormLayer::reshape(int batchSize) {
- hl_tensor_reshape(ioDesc_, batchSize, channels_, imageH_, imageW_);
+ hl_tensor_reshape(ioDesc_, batchSize, channels_, imageH_ * imageD_, imageW_);
}
void CudnnBatchNormLayer::forward(PassType passType) {
@@ -104,7 +104,7 @@ void CudnnBatchNormLayer::forward(PassType passType) {
EPS,
batchSize,
channels_,
- imageH_,
+ imageH_ * imageD_,
imageW_);
}
}
diff --git a/paddle/gserver/layers/DeConv3DLayer.cpp b/paddle/gserver/layers/DeConv3DLayer.cpp
index 1b59ed60c57fe3bbfa814befa8a63408a2621715..3eea638649e8ebfdd7efa18615977a9e1344c695 100644
--- a/paddle/gserver/layers/DeConv3DLayer.cpp
+++ b/paddle/gserver/layers/DeConv3DLayer.cpp
@@ -53,27 +53,27 @@ bool DeConv3DLayer::init(const LayerMap &layerMap,
size_t DeConv3DLayer::getSize() {
CHECK_NE(inputLayers_.size(), 0UL);
- outputH_.clear();
- outputW_.clear();
- outputD_.clear();
+ imgSizeW_.clear();
+ imgSizeH_.clear();
+ imgSizeD_.clear();
N_.clear();
NOut_.clear();
size_t layerSize = 0;
for (size_t i = 0; i < inputLayers_.size(); ++i) {
- outputW_.push_back(
- imageSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i], true));
- outputH_.push_back(imageSize(
- imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true));
- outputD_.push_back(imageSize(
- imgSizeD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true));
- NOut_.push_back(outputD_[i] * outputH_[i] * outputW_[i]);
- N_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]);
+ imgSizeW_.push_back(
+ imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i], true));
+ imgSizeH_.push_back(imageSize(
+ outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true));
+ imgSizeD_.push_back(imageSize(
+ outputD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true));
+ NOut_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]);
+ N_.push_back(outputD_[i] * outputH_[i] * outputW_[i]);
CHECK(layerSize == 0 || N_[i] * size_t(numFilters_) == layerSize);
layerSize += NOut_[i] * numFilters_;
}
- getOutput().setFrameHeight(outputH_[0]);
- getOutput().setFrameWidth(outputW_[0]);
- getOutput().setFrameDepth(outputD_[0]);
+ getOutput().setFrameHeight(imgSizeH_[0]);
+ getOutput().setFrameWidth(imgSizeW_[0]);
+ getOutput().setFrameDepth(imgSizeD_[0]);
return layerSize;
}
@@ -103,9 +103,9 @@ void DeConv3DLayer::forward(PassType passType) {
}
colBuf_->col2Vol(outMat->getData() + n * outMat->getStride(),
numFilters_,
- outputD_[i],
- outputH_[i],
- outputW_[i],
+ imgSizeD_[i],
+ imgSizeH_[i],
+ imgSizeW_[i],
filterSizeZ_[i],
filterSizeY_[i],
filterSize_[i],
@@ -144,9 +144,9 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) {
colBuf_->vol2Col(
getOutputGrad()->getData() + n * getOutputGrad()->getStride(),
numFilters_,
- outputD_[i],
- outputH_[i],
- outputW_[i],
+ imgSizeD_[i],
+ imgSizeH_[i],
+ imgSizeW_[i],
filterSizeZ_[i],
filterSizeY_[i],
filterSize_[i],
diff --git a/paddle/gserver/layers/DetectionOutputLayer.cpp b/paddle/gserver/layers/DetectionOutputLayer.cpp
index 8ab838e191314ab25469631626c0b0564d7fffda..0cf0a92bf4bd8f9b8eba2016b2377d9dfb18c70a 100644
--- a/paddle/gserver/layers/DetectionOutputLayer.cpp
+++ b/paddle/gserver/layers/DetectionOutputLayer.cpp
@@ -139,7 +139,13 @@ void DetectionOutputLayer::forward(PassType passType) {
allDecodedBBoxes,
&allIndices);
- resetOutput(numKept, 7);
+ if (numKept > 0) {
+ resetOutput(numKept, 7);
+ } else {
+ MatrixPtr outV = getOutputValue();
+ outV = NULL;
+ return;
+ }
MatrixPtr outV = getOutputValue();
getDetectionOutput(confBuffer_->getData(),
numKept,
diff --git a/paddle/gserver/layers/DetectionUtil.cpp b/paddle/gserver/layers/DetectionUtil.cpp
index 3e61adc66e60c54250e4f323452aa13045310879..d83674f45a70212a8adc94a31ff58eb0e01baa00 100644
--- a/paddle/gserver/layers/DetectionUtil.cpp
+++ b/paddle/gserver/layers/DetectionUtil.cpp
@@ -469,7 +469,7 @@ size_t getDetectionIndices(
const size_t numClasses,
const size_t backgroundId,
const size_t batchSize,
- const size_t confThreshold,
+ const real confThreshold,
const size_t nmsTopK,
const real nmsThreshold,
const size_t keepTopK,
diff --git a/paddle/gserver/layers/DetectionUtil.h b/paddle/gserver/layers/DetectionUtil.h
index fe4f9f075e4cf011c97f68f49598a828d62327b3..641ed873b4c8645b6455e5ef5e63593e3005b770 100644
--- a/paddle/gserver/layers/DetectionUtil.h
+++ b/paddle/gserver/layers/DetectionUtil.h
@@ -275,7 +275,7 @@ size_t getDetectionIndices(
const size_t numClasses,
const size_t backgroundId,
const size_t batchSize,
- const size_t confThreshold,
+ const real confThreshold,
const size_t nmsTopK,
const real nmsThreshold,
const size_t keepTopK,
diff --git a/paddle/gserver/layers/GruCompute.cpp b/paddle/gserver/layers/GruCompute.cpp
index 06907768e98f4bad952706cffbbd65d1f86cc6df..148516391c6cad8feff34b9bd1c10c27d1a8a0e6 100644
--- a/paddle/gserver/layers/GruCompute.cpp
+++ b/paddle/gserver/layers/GruCompute.cpp
@@ -14,6 +14,7 @@ limitations under the License. */
#include "GruCompute.h"
#include "hl_recurrent_apply.cuh"
+#include "paddle/function/GruFunctor.h"
#include "paddle/utils/Util.h"
namespace paddle {
@@ -25,13 +26,13 @@ void GruCompute::init(LayerConfig &config) {
template <>
void GruCompute::forward<0>(hl_gru_value value, int frameSize, int batchSize) {
- hl_cpu_gru_forward(hppl::forward::gru_resetOutput(),
- hppl::forward::gru_finalOutput(),
- value,
- frameSize,
- batchSize,
- activeNode_,
- activeGate_);
+ GruFunctor::compute(hppl::forward::gru_resetOutput(),
+ hppl::forward::gru_finalOutput(),
+ value,
+ frameSize,
+ batchSize,
+ activeNode_,
+ activeGate_);
}
template <>
@@ -39,14 +40,15 @@ void GruCompute::backward<0>(hl_gru_value value,
hl_gru_grad grad,
int frameSize,
int batchSize) {
- hl_cpu_gru_backward(hppl::backward::gru_stateGrad(),
- hppl::backward::gru_resetGrad(),
- value,
- grad,
- frameSize,
- batchSize,
- activeNode_,
- activeGate_);
+ GruGradFunctor::compute(
+ hppl::backward::gru_stateGrad(),
+ hppl::backward::gru_resetGrad(),
+ value,
+ grad,
+ frameSize,
+ batchSize,
+ activeNode_,
+ activeGate_);
}
} // namespace paddle
diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h
index edef36194aabdb9c122ec3423deb036169a34d7c..4002a3d0747a86ab7b495ffe52247521831b71b8 100644
--- a/paddle/gserver/layers/Layer.h
+++ b/paddle/gserver/layers/Layer.h
@@ -49,6 +49,12 @@ struct LayerState {
};
typedef std::shared_ptr LayerStatePtr;
+/// Paddle device ID, MKLDNN is -2, CPU is -1
+enum PADDLE_DEVICE_ID {
+ MKLDNN_DEVICE = -2,
+ CPU_DEVICE = -1,
+};
+
/**
* @brief Base class for layer.
* Define necessary variables and functions for every layer.
@@ -59,11 +65,6 @@ protected:
LayerConfig config_;
/// whether to use GPU
bool useGpu_;
- /// Paddle device ID, MKLDNN is -2, CPU is -1
- enum PADDLE_DEVICE_ID {
- MKLDNN_DEVICE = -2,
- CPU_DEVICE = -1,
- };
/// Device Id. MKLDNN is -2, CPU is -1, and GPU is 0, 1, 2 ...
int deviceId_;
/// Input layers
diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f8c06c5f868f8d48a9a222b92315ee0ef2cf265e
--- /dev/null
+++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp
@@ -0,0 +1,543 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "MKLDNNConvLayer.h"
+#include "paddle/math/MathUtils.h"
+#include "paddle/utils/Logging.h"
+
+using namespace mkldnn; // NOLINT
+typedef memory::format format;
+
+namespace paddle {
+
+REGISTER_LAYER(mkldnn_conv, MKLDNNConvLayer);
+
+bool MKLDNNConvLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ if (!MKLDNNLayer::init(layerMap, parameterMap)) {
+ return false;
+ }
+ CHECK_EQ(inputLayers_.size(), 1) << "Only support one input layer yet";
+ CHECK_EQ(inputLayers_.size(), parameters_.size());
+ CHECK(config_.shared_biases()) << "Only support shared biases yet";
+
+ oc_ = config_.num_filters();
+ const ConvConfig& conf = config_.inputs(0).conv_conf();
+ ic_ = conf.channels();
+ fw_ = conf.filter_size();
+ fh_ = conf.filter_size_y();
+ pw_ = conf.padding();
+ ph_ = conf.padding_y();
+ dw_ = conf.dilation();
+ dh_ = conf.dilation_y();
+ sw_ = conf.stride();
+ sh_ = conf.stride_y();
+ gp_ = conf.groups();
+ oh_ = conf.output_y();
+ ow_ = conf.output_x();
+ ih_ = conf.img_size_y();
+ iw_ = conf.img_size();
+ caffeMode_ = conf.caffe_mode();
+ CHECK(caffeMode_) << "Only support caffe mode yet";
+ CHECK(dh_ == 1 && dw_ == 1) << "Only support dilation 1 yet";
+ // check group setting
+ CHECK_EQ((oc_ / gp_) * gp_, oc_) << "group is indivisible for oc";
+ CHECK_EQ((ic_ / gp_) * gp_, ic_) << "group is indivisible for ic";
+
+ // create weight
+ size_t height = oc_ / gp_;
+ size_t width = ic_ * fh_ * fw_;
+ CHECK_EQ(parameters_[0]->getSize(), height * width);
+ weight_ =
+ std::unique_ptr(new Weight(height, width, parameters_[0], 0));
+
+ // create biases
+ if (biasParameter_.get() != NULL) {
+ biases_ = std::unique_ptr(new Weight(1, oc_, biasParameter_));
+ }
+ return true;
+}
+
+void MKLDNNConvLayer::convertWeightsFromPaddle() {
+ if (hasInitedWgt_) {
+ return;
+ }
+
+ CHECK(wgtVal_) << "should have been initialized";
+ // the paddle weight format is oihw or goihw
+ auto targetDim = wgtVal_->getDims();
+ auto srcFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
+ wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim);
+ hasInitedWgt_ = true;
+}
+
+void MKLDNNConvLayer::convertWeightsToPaddle() {
+ CHECK(wgtVal_) << "should have been initialized";
+ auto targetDim = wgtVal_->getDims();
+ auto dstFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
+ wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
+}
+
+void MKLDNNConvLayer::reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
+ reshapeInput(bs, ih, iw);
+
+ // cal output sizes
+ // oc can not be changed
+ int fh = (fh_ - 1) * dh_ + 1;
+ int fw = (fw_ - 1) * dw_ + 1;
+ oh = outputSize(ih, fh, ph_, sh_, caffeMode_);
+ ow = outputSize(iw, fw, pw_, sw_, caffeMode_);
+
+ reshapeOutput(oh, ow);
+ resizeOutput(bs, oc * oh * ow);
+
+ printSizeInfo();
+}
+
+void MKLDNNConvLayer::resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ resetFwdPD(fwdPD_);
+
+ resetFwdBuffers(fwdPD_, in, wgt, bias, out);
+
+ resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
+
+ printValueFormatFlow();
+}
+
+void MKLDNNConvLayer::resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ std::shared_ptr bwdWgtPD;
+ std::shared_ptr bwdDataPD;
+
+ resetBwdWgtPD(bwdWgtPD);
+
+ resetBwdDataPD(bwdDataPD);
+
+ resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out);
+
+ resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
+
+ printGradFormatFlow();
+}
+
+void MKLDNNConvLayer::updateInputData() {
+ cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
+}
+
+void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) {
+ weight_->getParameterPtr()->incUpdate(callback);
+ if (biases_ && biases_->getWGrad()) {
+ biases_->getParameterPtr()->incUpdate(callback);
+ }
+}
+
+void MKLDNNConvLayer::loadConvSettings(memory::dims& wgt,
+ memory::dims& bias,
+ memory::dims& stride,
+ memory::dims& dilation,
+ memory::dims& padL,
+ memory::dims& padR) {
+ wgt = (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_}
+ : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_};
+ bias = memory::dims{oc_};
+ stride = memory::dims{sh_, sw_};
+ padL = memory::dims{ph_, pw_};
+ padR = getPaddingR();
+ // note: mkldnn dilation start from 0
+ dilation = memory::dims{dh_ - 1, dw_ - 1};
+}
+
+void MKLDNNConvLayer::resetFwdPD(
+ std::shared_ptr& pd) {
+ // dims for conv
+ memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
+ memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
+ memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
+ loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
+
+ prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring
+ : prop_kind::forward_training;
+ algorithm algo = algorithm::convolution_direct;
+ padding_kind padKind = padding_kind::zero;
+ conv_fwd::desc fwdDesc =
+ biases_ && biases_->getW()
+ ? conv_fwd::desc(pk,
+ algo,
+ MKLDNNMatrix::createMemoryDesc(inDims),
+ MKLDNNMatrix::createMemoryDesc(wgtDims),
+ MKLDNNMatrix::createMemoryDesc(biasDims),
+ MKLDNNMatrix::createMemoryDesc(outDims),
+ strides,
+ dilations,
+ padL,
+ padR,
+ padKind)
+ : conv_fwd::desc(pk,
+ algo,
+ MKLDNNMatrix::createMemoryDesc(inDims),
+ MKLDNNMatrix::createMemoryDesc(wgtDims),
+ MKLDNNMatrix::createMemoryDesc(outDims),
+ strides,
+ dilations,
+ padL,
+ padR,
+ padKind);
+ pd.reset(new conv_fwd::primitive_desc(fwdDesc, engine_));
+}
+
+void MKLDNNConvLayer::resetFwdBuffers(
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ CHECK(pd);
+ resetInValue(pd, in);
+
+ resetWgtBiasValue(pd, wgt, bias);
+
+ resetOutValue(pd, out);
+}
+
+void MKLDNNConvLayer::resetFwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+
+ if (cvtInVal_) {
+ pipeline.push_back(*cvtInVal_);
+ }
+
+ if (bias) {
+ fwd_.reset(new conv_fwd(*pd, *in, *wgt, *bias, *out));
+ } else {
+ fwd_.reset(new conv_fwd(*pd, *in, *wgt, *out));
+ }
+ pipeline.push_back(*fwd_);
+
+ if (cvtOutVal_) {
+ pipeline.push_back(*cvtOutVal_);
+ }
+}
+
+void MKLDNNConvLayer::resetInValue(
+ std::shared_ptr& pd, MKLDNNMatrixPtr& in) {
+ const MatrixPtr& inMat = inputLayers_[0]->getOutput().value;
+ in = MKLDNNMatrix::create(inMat, pd->src_primitive_desc());
+
+ // create buffer and reorder if input value do not match
+ cpuInVal_ = nullptr;
+ cvtInVal_ = nullptr;
+ if (inputIsOnlyMKLDNN()) {
+ MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast(inMat);
+ CHECK(dnnIn) << "Input should be MKLDNNMatrix";
+ if (dnnIn->getPrimitiveDesc() != in->getPrimitiveDesc()) {
+ CHECK_EQ(dnnIn->getFormat(), format::nc);
+ CHECK(ih_ == 1 && iw_ == 1) << "when input is nc format";
+ // create a new one with nchw format and same data
+ memory::dims inDims = memory::dims{bs_, ic_, 1, 1};
+ dnnIn = MKLDNNMatrix::create(inMat, inDims, format::nchw, engine_);
+ CHECK(dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc());
+ }
+ in = dnnIn;
+ } else {
+ const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
+ memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
+ cpuInVal_ = MKLDNNMatrix::create(cpuIn, inDims, format::nchw, engine_);
+ if (cpuInVal_->getPrimitiveDesc() != in->getPrimitiveDesc()) {
+ // create new mkldnn matrix
+ in = MKLDNNMatrix::create(nullptr, pd->src_primitive_desc());
+ cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in);
+ CHECK(cvtInVal_) << "should not be emptry";
+ } else {
+ in = cpuInVal_;
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetWgtBiasValue(
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias) {
+ wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc());
+ VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat();
+
+ bias = nullptr;
+ if (biases_ && biases_->getW()) {
+ bias = MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc());
+ }
+}
+
+void MKLDNNConvLayer::resetOutValue(
+ std::shared_ptr& pd, MKLDNNMatrixPtr& out) {
+ out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc());
+
+ // change original output value from cpu matrix to mkldnn matrix
+ output_.value = std::dynamic_pointer_cast(out);
+
+ // create reorder if output value has cpu device and pd do not match
+ cpuOutVal_ = nullptr;
+ cpuOutVal_ = nullptr;
+ if (!outputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value;
+ memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
+ cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_);
+ if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
+ cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_);
+ CHECK(cvtOutVal_) << "should not be emptry";
+ } else {
+ // CPU output share the same data of MKLDNN output
+ cpuOut->setData(out->getData());
+ cpuOutVal_ = out;
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetBwdWgtPD(
+ std::shared_ptr& pd) {
+ memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
+ loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
+
+ // create backward weight using input, output and weight value memory desc
+ CHECK(inVal_) << "Should have input value";
+ CHECK(outVal_) << "Should have output value";
+ CHECK(wgtVal_) << "Should have weight value";
+ algorithm algo = algorithm::convolution_direct;
+ padding_kind padKind = padding_kind::zero;
+ auto bwdWgtDesc = biasVal_ != nullptr
+ ? conv_bwdWgt::desc(algo,
+ inVal_->getMemoryDesc(),
+ wgtVal_->getMemoryDesc(),
+ biasVal_->getMemoryDesc(),
+ outVal_->getMemoryDesc(),
+ strides,
+ padL,
+ padR,
+ padKind)
+ : conv_bwdWgt::desc(algo,
+ inVal_->getMemoryDesc(),
+ wgtVal_->getMemoryDesc(),
+ outVal_->getMemoryDesc(),
+ strides,
+ padL,
+ padR,
+ padKind);
+ pd.reset(new conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
+ CHECK(pd->src_primitive_desc() == inVal_->getPrimitiveDesc())
+ << "primitive desc of in value should equal";
+ CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc())
+ << "primitive desc of out grad should equal the out value";
+ CHECK(pd->diff_weights_primitive_desc() == wgtVal_->getPrimitiveDesc())
+ << "primitive desc of weight grad should equal the weight value";
+}
+
+void MKLDNNConvLayer::resetBwdDataPD(
+ std::shared_ptr& pd) {
+ if (inputLayers_[0]->getOutput().grad == nullptr) {
+ return;
+ }
+
+ memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
+ loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
+ CHECK(inVal_) << "Should have input value";
+ CHECK(outVal_) << "Should have output value";
+ // create backward data using input and output value memory desc
+ // but using weight memory desc with any format
+ auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct,
+ inVal_->getMemoryDesc(),
+ MKLDNNMatrix::createMemoryDesc(wgtDims),
+ outVal_->getMemoryDesc(),
+ strides,
+ padL,
+ padR,
+ padding_kind::zero);
+ pd.reset(new conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
+ CHECK(pd->diff_src_primitive_desc() == inVal_->getPrimitiveDesc())
+ << "primitive desc of in grad should equal the in value";
+ CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc())
+ << "primitive desc of out grad should equal";
+}
+
+void MKLDNNConvLayer::resetBwdBuffers(
+ std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ CHECK(wgtPD);
+ resetOutGrad(wgtPD, out);
+
+ resetWgtBiasGrad(wgtPD, wgt, bias);
+
+ resetInGrad(dataPD, in);
+
+ resetWgtValBwdData(dataPD, wgtValBwdData_);
+}
+
+void MKLDNNConvLayer::resetBwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+
+ if (cvtOutGrad_) {
+ pipeline.push_back(*cvtOutGrad_);
+ }
+
+ // add bwdWgt handle
+ if (bias) {
+ bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt, *bias));
+ } else {
+ bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt));
+ }
+ pipeline.push_back(*bwdWgt_);
+
+ if (dataPD == nullptr) {
+ return;
+ }
+
+ if (cvtWgtVal_) {
+ pipeline.push_back(*cvtWgtVal_);
+ }
+
+ // add bwdData handle
+ CHECK(wgtValBwdData_) << "Should have weight memory";
+ bwdData_.reset(new conv_bwdData(*dataPD, *out, *wgtValBwdData_, *in));
+ pipeline.push_back(*bwdData_);
+
+ if (cvtInGrad_) {
+ pipeline.push_back(*cvtInGrad_);
+ }
+}
+
+void MKLDNNConvLayer::resetOutGrad(
+ std::shared_ptr& wgtPD, MKLDNNMatrixPtr& out) {
+ const MatrixPtr& outMat = output_.grad;
+ out = MKLDNNMatrix::create(outMat, wgtPD->diff_dst_primitive_desc());
+ CHECK(outVal_ != nullptr &&
+ out->getPrimitiveDesc() == outVal_->getPrimitiveDesc())
+ << "primitive desc of out grad and value should be equal";
+
+ // TODO(TJ): merge outgrad
+ // create reorder if has output grad does not match
+ cpuOutGrad_ = nullptr;
+ cvtOutGrad_ = nullptr;
+ if (!outputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad;
+ // same PrimitiveDesc with cpuInVal_
+ CHECK(cpuOutVal_);
+ cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc());
+ if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) {
+ outMat->setData(cpuOut->getData());
+ out = cpuOutGrad_;
+ } else {
+ cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out);
+ CHECK(cvtOutGrad_);
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetWgtBiasGrad(
+ std::shared_ptr& wgtPD,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias) {
+ wgt = MKLDNNMatrix::create(weight_->getWGrad(),
+ wgtPD->diff_weights_primitive_desc());
+ CHECK(nullptr != wgtVal_ &&
+ wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc())
+ << "primitive desc of weight grad and value should be equal";
+ VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat();
+
+ if (biasVal_ == nullptr) {
+ return;
+ }
+ bias = MKLDNNMatrix::create(biases_->getWGrad(),
+ wgtPD->diff_bias_primitive_desc());
+ CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc())
+ << "primitive desc of bias grad should equal the bias value";
+}
+
+void MKLDNNConvLayer::resetInGrad(
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in) {
+ if (dataPD == nullptr) {
+ return;
+ }
+
+ // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
+ in = MKLDNNMatrix::create(inputLayers_[0]->getOutput().grad,
+ dataPD->diff_src_primitive_desc());
+ CHECK(nullptr != inVal_ &&
+ in->getPrimitiveDesc() == inVal_->getPrimitiveDesc())
+ << "primitive desc of input grad and value should be equal";
+
+ // create reorder if has output grad does not match
+ cpuInGrad_ = nullptr;
+ cvtInGrad_ = nullptr;
+ if (!inputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuIn = getInputGrad(0, CPU_DEVICE);
+ // same PrimitiveDesc with cpuInVal_
+ CHECK(cpuInVal_);
+ cpuInGrad_ = MKLDNNMatrix::create(cpuIn, cpuInVal_->getPrimitiveDesc());
+ if (cpuInGrad_->getPrimitiveDesc() != in->getPrimitiveDesc()) {
+ const MatrixPtr& dnnIn = getInputGrad(0, MKLDNN_DEVICE);
+ in = MKLDNNMatrix::create(dnnIn, in->getPrimitiveDesc());
+ cvtInGrad_ = MKLDNNMatrix::createReorder(in, cpuInGrad_);
+ CHECK(cvtInGrad_);
+ } else {
+ in = cpuInGrad_;
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetWgtValBwdData(
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& wgt) {
+ if (dataPD == nullptr) {
+ return;
+ }
+
+ // create new weight value for backward data, and create reorder if necessary
+ // since the primitive_desc would be different with wgtVal_
+ CHECK(wgtVal_) << "should have weight value";
+ if (dataPD->weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) {
+ wgtValBwdData_ =
+ MKLDNNMatrix::create(nullptr, dataPD->weights_primitive_desc());
+ cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_);
+ CHECK(cvtWgtVal_);
+ } else {
+ wgtValBwdData_ = wgtVal_;
+ }
+ VLOG(MKLDNN_FMTS) << "weight value format for backward data"
+ << wgtValBwdData_->getFormat();
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h
new file mode 100644
index 0000000000000000000000000000000000000000..f84f2f737c47a1b8adc2b83360a0396ffbc6ae24
--- /dev/null
+++ b/paddle/gserver/layers/MKLDNNConvLayer.h
@@ -0,0 +1,253 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "MKLDNNLayer.h"
+#include "mkldnn.hpp"
+
+namespace paddle {
+typedef mkldnn::convolution_forward conv_fwd;
+typedef mkldnn::convolution_backward_weights conv_bwdWgt;
+typedef mkldnn::convolution_backward_data conv_bwdData;
+
+/**
+ * @brief A subclass of MKLDNNLayer conv layer.
+ *
+ * The config file api is mkldnn_conv
+ */
+class MKLDNNConvLayer : public MKLDNNLayer {
+protected:
+ // padding height and width
+ int ph_, pw_;
+ // stride height and width
+ int sh_, sw_;
+ // dilation height and width
+ int dh_, dw_;
+ // filter(kenerl) height and width
+ int fh_, fw_;
+ // group number
+ int gp_;
+
+ // in resetBwdData, the format of wgtValBwdData_ is different with wgtVal_
+ MKLDNNMatrixPtr wgtValBwdData_;
+ // convert handle from wgtVal_ to wgtValBwdData_
+ std::shared_ptr cvtWgtVal_;
+
+ // save forward primitive_desc, which can be used backward
+ std::shared_ptr fwdPD_;
+
+ // MKLDNNMatrixPtr which should be created from CPU Device
+ MKLDNNMatrixPtr cpuInVal_;
+ MKLDNNMatrixPtr cpuInGrad_;
+ MKLDNNMatrixPtr cpuOutVal_;
+ MKLDNNMatrixPtr cpuOutGrad_;
+ // convert handle between CPU device and MKLDNN device
+ std::shared_ptr cvtInVal_;
+ std::shared_ptr cvtInGrad_;
+ std::shared_ptr cvtOutVal_;
+ std::shared_ptr cvtOutGrad_;
+
+ // whether the weight has been init
+ bool hasInitedWgt_;
+
+ // true by default, which impact the calculation of output image size.
+ // details can refer to mathUtil.h
+ bool caffeMode_;
+
+ // weight and bias
+ std::unique_ptr weight_;
+ std::unique_ptr biases_;
+
+public:
+ explicit MKLDNNConvLayer(const LayerConfig& config)
+ : MKLDNNLayer(config), hasInitedWgt_(false), caffeMode_(true) {}
+
+ ~MKLDNNConvLayer() {}
+
+ bool init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) override;
+
+ void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
+
+ void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
+
+ void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
+
+ void updateInputData() override;
+
+ void updateWeights(const UpdateCallback& callback) override;
+
+ void convertWeightsFromPaddle() override;
+
+ void convertWeightsToPaddle() override;
+
+ void printSizeInfo() override {
+ MKLDNNLayer::printSizeInfo();
+ VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
+ << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
+ << ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_;
+ }
+
+ void printValueFormatFlow() override {
+ if (cpuInVal_) {
+ VLOG(MKLDNN_FMTS) << cpuInVal_->getFormat() << " >>>";
+ }
+ MKLDNNLayer::printValueFormatFlow();
+ if (cpuOutVal_) {
+ VLOG(MKLDNN_FMTS) << " >>> " << cpuOutVal_->getFormat();
+ }
+ }
+
+ void printGradFormatFlow() override {
+ if (cpuInGrad_) {
+ VLOG(MKLDNN_FMTS) << cpuInGrad_->getFormat() << " <<<";
+ }
+ MKLDNNLayer::printGradFormatFlow();
+ if (cpuOutGrad_) {
+ VLOG(MKLDNN_FMTS) << " <<< " << cpuOutGrad_->getFormat();
+ }
+ }
+
+protected:
+ /**
+ * load the dims settings of this conv
+ */
+ void loadConvSettings(mkldnn::memory::dims& wgt,
+ mkldnn::memory::dims& bias,
+ mkldnn::memory::dims& stride,
+ mkldnn::memory::dims& dilation,
+ mkldnn::memory::dims& padL,
+ mkldnn::memory::dims& padR);
+
+ /**
+ * reset the forward primitive descriptor.
+ */
+ void resetFwdPD(std::shared_ptr& pd);
+ /**
+ * reset the MKLDNNMatrix buffers used in forward.
+ */
+ void resetFwdBuffers(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ /**
+ * reset the forward pipeline.
+ */
+ void resetFwdPipeline(std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * reset MKLDNNMatrix of input value
+ */
+ void resetInValue(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in);
+ /**
+ * reset MKLDNNMatrix of weight and bias value
+ */
+ void resetWgtBiasValue(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias);
+ /**
+ * reset MKLDNNMatrix of output value
+ */
+ void resetOutValue(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * reset the backward weight primitive descriptor.
+ */
+ void resetBwdWgtPD(std::shared_ptr& pd);
+ /**
+ * reset the backward data primitive descriptor.
+ */
+ void resetBwdDataPD(std::shared_ptr& pd);
+ /**
+ * reset the MKLDNNMatrix buffers used in backward.
+ */
+ void resetBwdBuffers(std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ /**
+ * reset the backward pipeline.
+ */
+ void resetBwdPipeline(std::vector& pipeline,
+ std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * reset MKLDNNMatrix of output grad
+ */
+ void resetOutGrad(std::shared_ptr& wgtPD,
+ MKLDNNMatrixPtr& out);
+ /**
+ * reset MKLDNNMatrix of weight and bias grad
+ */
+ void resetWgtBiasGrad(std::shared_ptr& wgtPD,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias);
+ /**
+ * reset MKLDNNMatrix of input grad
+ */
+ void resetInGrad(std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in);
+ /**
+ * reset MKLDNNMatrix of weight value for backward data
+ * since the primitive_desc would be different with wgtVal_
+ */
+ void resetWgtValBwdData(std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& wgt);
+
+ /**
+ * get padding_r according to
+ * https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
+ * test_convolution_forward_common.hpp
+ * @note: mkldnn dilation start from 0 while paddle start from 1
+ */
+ mkldnn::memory::dims getPaddingR() const {
+ mkldnn::memory::dims padR = {ph_, pw_};
+ for (int i = 0; i < 2; ++i) {
+ if ((ih_ - ((fh_ - 1) * dh_ + 1) + ph_ + padR[0]) / sh_ + 1 != oh_) {
+ ++padR[0];
+ }
+ if ((iw_ - ((fw_ - 1) * dw_ + 1) + pw_ + padR[1]) / sw_ + 1 != ow_) {
+ ++padR[1];
+ }
+ }
+ return padR;
+ }
+};
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp
index 8318c8c519a4cec1610eadd28320ee5ce0b4147d..f70343251ad4fbb99f9614618f6d1bff1174f15e 100644
--- a/paddle/gserver/layers/MKLDNNFcLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp
@@ -14,7 +14,6 @@ limitations under the License. */
#include "MKLDNNFcLayer.h"
#include "paddle/utils/Logging.h"
-#include "paddle/utils/Stat.h"
using namespace mkldnn; // NOLINT
typedef memory::format format;
@@ -40,6 +39,8 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
oc_ = getSize();
oh_ = 1;
ow_ = 1;
+ ih_ = 1;
+ iw_ = 1;
// input size can not change in FC
iLayerSize_ = inputLayers_[0]->getSize();
@@ -77,111 +78,86 @@ void MKLDNNFcLayer::convertWeightsToPaddle() {
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
}
-void MKLDNNFcLayer::convertOutputToOtherDevice() {
- copyOutputInfoToOtherDevice();
- // find other cpu device and reorder output to cpu device
- int cnt = 0;
- for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
- if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
- // fc cpu output value do not need convert
- // just share point
- outputOtherDevice_[i].value = output_.value;
- ++cnt;
- }
- }
-
- if (cnt > 1) {
- LOG(WARNING) << "should not have more than one CPU devie";
- }
-}
+void MKLDNNFcLayer::reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
+ reshapeInput(bs, ih, iw);
-void MKLDNNFcLayer::reshape() {
- const Argument& input = getInput(0, getPrev(0)->getDeviceId());
- int batchSize = input.getBatchSize();
- if (bs_ == batchSize) {
- return;
- }
- bs_ = batchSize;
- ih_ = input.getFrameHeight();
- iw_ = input.getFrameWidth();
- if (ih_ == 0) {
- ih_ = 1;
- }
- if (iw_ == 0) {
- iw_ = 1;
- }
CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize());
- ic_ = iLayerSize_ / (ih_ * iw_);
- CHECK_EQ(size_t(ic_ * ih_ * iw_), iLayerSize_) << "not divisible";
- CHECK_EQ(size_t(oc_), getSize());
- printSizeInfo();
+ ic = iLayerSize_ / (ih * iw);
+ CHECK_EQ(size_t(ic * ih * iw), iLayerSize_) << "not divisible";
+ CHECK_EQ(size_t(oc), getSize());
- // reset output
- output_.setFrameHeight(oh_);
- output_.setFrameWidth(ow_);
- resetOutput(bs_, oc_);
+ reshapeOutput(oh, ow);
+ resizeOutput(bs, oc);
- // reset mkldnn forward
- resetFwd();
- needResetBwd_ = true;
-
- convertWeightsFromPaddle();
+ printSizeInfo();
}
-void MKLDNNFcLayer::resetFwd() {
+void MKLDNNFcLayer::resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
bool hasBias = biases_ && biases_->getW();
- const MatrixPtr& wgt = weight_->getW();
- const MatrixPtr& bias = hasBias ? biases_->getW() : nullptr;
- const MatrixPtr& out = output_.value;
+ const MatrixPtr& wgtVal = weight_->getW();
+ const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr;
+ const MatrixPtr& outVal = output_.value;
if (inputIsOnlyMKLDNN()) {
- const MatrixPtr& in = getInputValue(0);
- inVal_ = std::dynamic_pointer_cast(in);
- CHECK(inVal_) << "Input should be MKLDNNMatrix";
+ const MatrixPtr& inVal = getInputValue(0);
+ in = std::dynamic_pointer_cast(inVal);
+ CHECK(in) << "Input should be MKLDNNMatrix";
} else {
CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
- const MatrixPtr& in = getInputValue(0, CPU_DEVICE);
- inVal_ = MKLDNNMatrix::create(
- in, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_);
- }
- inVal_->downSpatial();
- wgtVal_ = MKLDNNMatrix::create(
- wgt, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_);
- wgtVal_->downSpatial();
- biasVal_ =
- hasBias ? MKLDNNMatrix::create(bias, {oc_}, format::x, engine_) : nullptr;
- outVal_ = MKLDNNMatrix::create(out, {bs_, oc_}, format::nc, engine_);
+ const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE);
+ in = MKLDNNMatrix::create(
+ inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_);
+ }
+ in->downSpatial();
+ wgt = MKLDNNMatrix::create(
+ wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_);
+ wgt->downSpatial();
+ bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_)
+ : nullptr;
+ out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_);
// change original output value to mkldnn output value
- output_.value = std::dynamic_pointer_cast(outVal_);
+ output_.value = std::dynamic_pointer_cast(out);
if (!outputIsOnlyMKLDNN()) {
- convertOutputToOtherDevice();
+ // fc cpu output value do not need create convert
+ // just share point
+ getOutput(CPU_DEVICE).value->setData(output_.value->getData());
}
// create forward handle
prop_kind pk = prop_kind::forward;
fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk,
- inVal_->getMemoryDesc(),
- wgtVal_->getMemoryDesc(),
- biasVal_->getMemoryDesc(),
- outVal_->getMemoryDesc())
+ in->getMemoryDesc(),
+ wgt->getMemoryDesc(),
+ bias->getMemoryDesc(),
+ out->getMemoryDesc())
: fc_fwd::desc(pk,
- inVal_->getMemoryDesc(),
- wgtVal_->getMemoryDesc(),
- outVal_->getMemoryDesc());
+ in->getMemoryDesc(),
+ wgt->getMemoryDesc(),
+ out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
if (hasBias) {
- fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *biasVal_, *outVal_));
+ fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out));
} else {
- fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *outVal_));
+ fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out));
}
printValueFormatFlow();
- pipelineFwd_.clear();
- pipelineFwd_.push_back(*fwd_);
+ pipeline.push_back(*fwd_);
}
-void MKLDNNFcLayer::resetBwd() {
+void MKLDNNFcLayer::resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
if (!needResetBwd_) {
return;
}
@@ -190,8 +166,8 @@ void MKLDNNFcLayer::resetBwd() {
/// backward weight
CHECK(inVal_) << "Should have input value";
- const MatrixPtr& wgt = weight_->getWGrad();
- const MatrixPtr& bias = hasBias ? biases_->getWGrad() : nullptr;
+ const MatrixPtr& wgtGrad = weight_->getWGrad();
+ const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr;
// TODO(TJ): merge outgrad
int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
@@ -202,101 +178,66 @@ void MKLDNNFcLayer::resetBwd() {
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
- const MatrixPtr& out = getOutput(device).grad;
- outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc());
- wgtGrad_ = MKLDNNMatrix::create(wgt, wgtVal_->getPrimitiveDesc());
- biasGrad_ = hasBias ? MKLDNNMatrix::create(bias, biasVal_->getPrimitiveDesc())
- : nullptr;
+ const MatrixPtr& outGrad = getOutput(device).grad;
+ out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc());
+ wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc());
+ bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc())
+ : nullptr;
// create memory primitive desc
fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward,
inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc());
+ wgt->getMemoryDesc(),
+ out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
fc_bwdWgt::desc bwdWgtDesc = hasBias
? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- biasGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc())
+ wgt->getMemoryDesc(),
+ bias->getMemoryDesc(),
+ out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc());
+ wgt->getMemoryDesc(),
+ out->getMemoryDesc());
fc_bwdWgt::primitive_desc bwdWgtPD =
fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD);
if (hasBias) {
- bwdWgt_.reset(
- new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_, *biasGrad_));
+ bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias));
} else {
- bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_));
+ bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt));
}
- pipelineBwd_.clear();
- pipelineBwd_.push_back(*bwdWgt_);
+ pipeline.push_back(*bwdWgt_);
/// backward data
- device = inputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
- const MatrixPtr& in = getInputGrad(0, device);
- if (in == nullptr) {
+ const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
+ if (inGrad == nullptr) {
return;
}
- if (getInput(0, device).getAllCount() > 1) {
- // TODO(TJ): use outputMaps_ ways when merge outgrad done
+ if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) {
+ // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
} else {
- inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc());
+ in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
- fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc());
+ fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
+ inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc());
fc_bwdData::primitive_desc bwdDataPD =
fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD);
CHECK(wgtVal_) << "Should have weight memory";
- bwdData_.reset(new fc_bwdData(bwdDataPD, *outGrad_, *wgtVal_, *inGrad_));
+ bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in));
printGradFormatFlow();
- pipelineBwd_.push_back(*bwdData_);
+ pipeline.push_back(*bwdData_);
}
-void MKLDNNFcLayer::forward(PassType passType) {
- Layer::forward(passType);
- reshape();
-
- {
- REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
- syncInputValue();
-
- // just submit forward pipeline
- stream_->submit(pipelineFwd_);
- }
-
- /* activation */ {
- REGISTER_TIMER_INFO("FwActTimer", getName().c_str());
- forwardActivation();
- }
+void MKLDNNFcLayer::updateInputData() {
+ inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
}
-void MKLDNNFcLayer::backward(const UpdateCallback& callback) {
- /* Do derivation */ {
- REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
- backwardActivation();
- }
-
- {
- REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
- resetBwd();
-
- syncOutputGrad();
- // just sumbmit backward pipeline
- stream_->submit(pipelineBwd_);
- }
-
- {
- REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
- weight_->getParameterPtr()->incUpdate(callback);
- if (biases_ && biases_->getWGrad()) {
- biases_->getParameterPtr()->incUpdate(callback);
- }
+void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
+ weight_->getParameterPtr()->incUpdate(callback);
+ if (biases_ && biases_->getWGrad()) {
+ biases_->getParameterPtr()->incUpdate(callback);
}
}
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h
index e138a6faf181c412949218458e7ecf800a0d6a07..3119f863496df092da13c08bf733f13c42e53780 100644
--- a/paddle/gserver/layers/MKLDNNFcLayer.h
+++ b/paddle/gserver/layers/MKLDNNFcLayer.h
@@ -45,35 +45,28 @@ public:
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
- void convertWeightsFromPaddle() override;
+ void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
- void convertWeightsToPaddle() override;
+ void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
- void forward(PassType passType) override;
+ void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
- void backward(const UpdateCallback& callback) override;
+ void updateInputData() override;
-protected:
- /**
- * reshape the input image sizes
- * and reset output buffer size
- * and reset mkldnn forward
- */
- void reshape();
-
- /**
- * reset the forward primitve and memory
- * only would be called when input size changes
- */
- void resetFwd();
-
- /**
- * reset the backward primitve and memory for mkldnn fc
- * only would be called when needed
- */
- void resetBwd();
-
- void convertOutputToOtherDevice() override;
+ void updateWeights(const UpdateCallback& callback) override;
+
+ void convertWeightsFromPaddle() override;
+
+ void convertWeightsToPaddle() override;
};
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h
index b983b833d510b823c5d4cff0b9390173e4cefc89..169679c8297542cac4a43f5a8e1af311ad9282df 100644
--- a/paddle/gserver/layers/MKLDNNLayer.h
+++ b/paddle/gserver/layers/MKLDNNLayer.h
@@ -19,6 +19,7 @@ limitations under the License. */
#include "MKLDNNBase.h"
#include "mkldnn.hpp"
#include "paddle/math/MKLDNNMatrix.h"
+#include "paddle/utils/Stat.h"
DECLARE_bool(use_mkldnn);
@@ -33,6 +34,8 @@ typedef std::shared_ptr MKLDNNLayerPtr;
*/
class MKLDNNLayer : public Layer {
protected:
+ // input value element count
+ size_t inputElemenCnt_;
// batch size
int bs_;
// input image channel, height and width
@@ -52,7 +55,7 @@ protected:
std::vector pipelineFwd_;
std::vector pipelineBwd_;
- // MKLDNNMatrixPtr
+ // MKLDNNMatrixPtr with internal format
MKLDNNMatrixPtr inVal_;
MKLDNNMatrixPtr inGrad_;
MKLDNNMatrixPtr outVal_;
@@ -65,6 +68,7 @@ protected:
public:
explicit MKLDNNLayer(const LayerConfig& config)
: Layer(config),
+ inputElemenCnt_(0),
bs_(0),
ic_(0),
ih_(0),
@@ -95,12 +99,104 @@ public:
if (!Layer::init(layerMap, parameterMap)) {
return false;
}
+ checkCPUOutputsNumber();
stream_.reset(new MKLDNNStream());
engine_ = CPUEngine::Instance().getEngine();
return true;
}
+ void forward(PassType passType) override {
+ passType_ = passType;
+
+ {
+ REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
+ CHECK(!inputLayers_.empty());
+ copySeqInfoToOutputs();
+ size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt();
+ if (inputElemenCnt_ != elemenCnt) {
+ // reset when input total sizes changed, not only the batchsize
+ inputElemenCnt_ = elemenCnt;
+ reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_);
+ resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_);
+ convertWeightsFromPaddle();
+ needResetBwd_ = true;
+ }
+
+ if (inputLayers_[0]->getType() == "data") {
+ updateInputData();
+ }
+
+ stream_->submit(pipelineFwd_);
+ }
+
+ /* activation */ {
+ REGISTER_TIMER_INFO("FwActTimer", getName().c_str());
+ forwardActivation();
+ }
+ }
+
+ void backward(const UpdateCallback& callback) override {
+ /* Do derivation */ {
+ REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
+ backwardActivation();
+ }
+
+ {
+ REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
+ if (needResetBwd_) {
+ resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_);
+ needResetBwd_ = false;
+ }
+
+ stream_->submit(pipelineBwd_);
+ }
+
+ {
+ REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
+ updateWeights(callback);
+ }
+ }
+
+ /**
+ * reshape the input image sizes
+ * and reset output image and buffer size
+ * output channel can not be changed
+ */
+ virtual void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0;
+
+ /**
+ * reset the mkldnn forward primitve and memory
+ * only would be called when input size changes
+ */
+ virtual void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) = 0;
+
+ /**
+ * reset the mkldnn backward primitve and memory for mkldnn fc
+ * only would be called when needed
+ */
+ virtual void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) = 0;
+
+ /**
+ * Update input value data when input layer is "data" type.
+ * Since the input value data address might be changed.
+ */
+ virtual void updateInputData() {}
+
+ /**
+ * Update weights and biases if necessary.
+ */
+ virtual void updateWeights(const UpdateCallback& callback) {}
+
/**
* convert weight from paddle format to mkldnn format
* weight_ will be override
@@ -114,10 +210,38 @@ public:
virtual void convertWeightsToPaddle() {}
/**
- * convert MKLDNN output to other device.
- * only support CPU device yet
+ * add this interface as public for unit test
+ */
+ void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); }
+
+protected:
+ /**
+ * reshape the input image sizes and input batchsize
*/
- virtual void convertOutputToOtherDevice() {}
+ virtual void reshapeInput(int& batchsize, int& height, int& width) {
+ const Argument& input = inputLayers_[0]->getOutput();
+ batchsize = input.getBatchSize();
+ int h = input.getFrameHeight();
+ int w = input.getFrameWidth();
+ if (h != 0) {
+ height = h;
+ }
+ if (w != 0) {
+ width = w;
+ }
+ }
+
+ /**
+ * reshape output image sizes
+ */
+ virtual void reshapeOutput(size_t height, size_t width) {
+ output_.setFrameHeight(height);
+ output_.setFrameWidth(width);
+ for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
+ outputOtherDevice_[i].setFrameHeight(height);
+ outputOtherDevice_[i].setFrameWidth(width);
+ }
+ }
/**
* print info about sizes
@@ -133,8 +257,8 @@ public:
*/
virtual void printValueFormatFlow() {
if (inVal_ && outVal_) {
- VLOG(MKLDNN_FMTS) << "value format flow --- " << inVal_->getFormat()
- << " >>> " << outVal_->getFormat();
+ VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>> "
+ << outVal_->getFormat();
}
}
@@ -143,29 +267,12 @@ public:
*/
virtual void printGradFormatFlow() {
if (inGrad_ && outGrad_) {
- VLOG(MKLDNN_FMTS) << "grad format flow --- " << inGrad_->getFormat()
- << " <<< " << outGrad_->getFormat();
+ VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<< "
+ << outGrad_->getFormat();
}
}
protected:
- /**
- * copy image size and sequence info to other device
- * @note: can not directly use Layer::copyOutputToOtherDevice since here only
- * copy base info and do not copy data value
- */
- void copyOutputInfoToOtherDevice() {
- for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
- outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight());
- outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth());
- outputOtherDevice_[i].sequenceStartPositions =
- output_.sequenceStartPositions;
- outputOtherDevice_[i].subSequenceStartPositions =
- output_.subSequenceStartPositions;
- outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
- }
- }
-
/**
* If input only has MKLDNN device.
* Otherwise, only support the previous layer using CPU device.
@@ -193,37 +300,12 @@ protected:
return outputOtherDevice_.size() == 0;
}
- /**
- * Sync input value data
- */
- void syncInputValue() {
- if (inputIsOnlyMKLDNN()) {
- return;
- }
- real* iData = getInputValue(0, CPU_DEVICE)->getData();
- // update input data
- // since it might be changed if this is after data layer
- inVal_->updateData(iData);
- }
-
- /**
- * Sync output grad data
- */
- void syncOutputGrad() {
- if (outputIsOnlyMKLDNN()) {
- return;
- }
-
- // update diff
- real* oDiff = getOutput(CPU_DEVICE).grad->getData();
- outGrad_->updateData(oDiff);
- }
-
/**
* Set deviceId of this layer.
*/
void setDevice(int id) { deviceId_ = id; }
+private:
/**
* Set deviceId of the params used in this layer.
*/
@@ -247,6 +329,42 @@ protected:
parameter->setDevice(id);
}
}
+
+ /**
+ * Check the cpu device number of outputOtherDevice_.
+ * should have only one at most.
+ */
+ void checkCPUOutputsNumber(int max = 1) {
+ int cnt = 0;
+ for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
+ if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
+ ++cnt;
+ }
+ }
+ CHECK_LE(cnt, max) << "too much CPU devies";
+ }
+
+ /**
+ * copy SeqInfo from input layer to this output and other output devices.
+ * @note: do not use getInput(0) since it used this deviceId_,
+ * use "inputLayers_[0]->getOutput()" instead.
+ */
+ void copySeqInfoToOutputs() {
+ if (inputLayers_.empty() || !needSequenceInfo_) {
+ return;
+ }
+ const Argument& input = inputLayers_[0]->getOutput();
+ output_.sequenceStartPositions = input.sequenceStartPositions;
+ output_.subSequenceStartPositions = input.subSequenceStartPositions;
+ output_.cpuSequenceDims = input.cpuSequenceDims;
+ for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
+ outputOtherDevice_[i].sequenceStartPositions =
+ output_.sequenceStartPositions;
+ outputOtherDevice_[i].subSequenceStartPositions =
+ output_.subSequenceStartPositions;
+ outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
+ }
+ }
};
} // namespace paddle
diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/gserver/layers/SwitchOrderLayer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e97809141a93106f9e6ebaf40c7e8aa9c6010557
--- /dev/null
+++ b/paddle/gserver/layers/SwitchOrderLayer.cpp
@@ -0,0 +1,109 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "SwitchOrderLayer.h"
+#include "paddle/utils/Stat.h"
+
+namespace paddle {
+
+REGISTER_LAYER(switch_order, SwitchOrderLayer);
+
+bool SwitchOrderLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ /* Initialize the basic parent class */
+ Layer::init(layerMap, parameterMap);
+ auto& img_conf = config_.inputs(0).image_conf();
+ size_t inD = img_conf.img_size_z();
+ size_t inH =
+ img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size();
+ size_t inW = img_conf.img_size();
+ size_t inC = img_conf.channels();
+ inH = inH * inD;
+ inDims_ = TensorShape({0, inC, inH, inW});
+ outDims_ = TensorShape(4);
+
+ auto& reshape_conf = config_.reshape_conf();
+ for (int i = 0; i < reshape_conf.height_axis_size(); i++) {
+ heightAxis_.push_back(reshape_conf.height_axis(i));
+ }
+ for (int i = 0; i < reshape_conf.width_axis_size(); i++) {
+ widthAxis_.push_back(reshape_conf.width_axis(i));
+ }
+ createFunction(nchw2nhwc_, "NCHW2NHWC", FuncConfig());
+ createFunction(nhwc2nchw_, "NHWC2NCHW", FuncConfig());
+ return true;
+}
+
+void SwitchOrderLayer::setOutDims() {
+ outDims_.setDim(0, inDims_[0]);
+ outDims_.setDim(1, inDims_[2]);
+ outDims_.setDim(2, inDims_[3]);
+ outDims_.setDim(3, inDims_[1]);
+ reshapeHeight_ = 1;
+ for (size_t i = 0; i < heightAxis_.size(); i++) {
+ reshapeHeight_ *= outDims_[heightAxis_[i]];
+ }
+ output_.setFrameHeight(reshapeHeight_);
+ reshapeWidth_ = 1;
+ for (size_t i = 0; i < widthAxis_.size(); i++) {
+ reshapeWidth_ *= outDims_[widthAxis_[i]];
+ }
+ output_.setFrameWidth(reshapeWidth_);
+}
+
+void SwitchOrderLayer::setInDims() {
+ MatrixPtr input = inputLayers_[0]->getOutputValue();
+ size_t batchSize = input->getHeight();
+ inDims_.setDim(0, batchSize);
+ int d = inputLayers_[0]->getOutput().getFrameDepth();
+ d = (d == 0 ? 1 : d);
+ int h = inputLayers_[0]->getOutput().getFrameHeight();
+ if (h != 0) inDims_.setDim(2, h * d);
+ int w = inputLayers_[0]->getOutput().getFrameWidth();
+ if (w != 0) inDims_.setDim(3, w);
+ int totalCount = input->getElementCnt();
+ int channels = totalCount / (inDims_[0] * inDims_[2] * inDims_[3]);
+ if (channels != 0) inDims_.setDim(1, channels);
+}
+
+void SwitchOrderLayer::forward(PassType passType) {
+ Layer::forward(passType);
+ setInDims();
+ setOutDims();
+ resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]);
+ if (heightAxis_.size() > 0) {
+ resetOutput(reshapeHeight_, reshapeWidth_);
+ }
+
+ // switch NCHW to NHWC
+ BufferArgs inputs;
+ BufferArgs outputs;
+ inputs.addArg(*getInputValue(0), inDims_);
+ outputs.addArg(*getOutputValue(), outDims_);
+ nchw2nhwc_[0]->calc(inputs, outputs);
+ forwardActivation();
+}
+
+void SwitchOrderLayer::backward(const UpdateCallback& callback) {
+ (void)callback;
+ backwardActivation();
+
+ // switch NHWC to NCHW
+ BufferArgs inputs;
+ BufferArgs outputs;
+ inputs.addArg(*getOutputGrad(), outDims_);
+ outputs.addArg(*getInputGrad(0), inDims_, ADD_TO);
+ nhwc2nchw_[0]->calc(inputs, outputs);
+}
+} // namespace paddle
diff --git a/paddle/gserver/layers/SwitchOrderLayer.h b/paddle/gserver/layers/SwitchOrderLayer.h
new file mode 100644
index 0000000000000000000000000000000000000000..47b1f7f73ee783b3eae3c9cfe08b1459cef16a71
--- /dev/null
+++ b/paddle/gserver/layers/SwitchOrderLayer.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "Layer.h"
+
+namespace paddle {
+
+/**
+ * \brief This layer calculate softmax in image channel dimension.
+ */
+class SwitchOrderLayer : public Layer {
+public:
+ explicit SwitchOrderLayer(const LayerConfig& config) : Layer(config) {}
+
+ ~SwitchOrderLayer() {}
+
+ bool init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) override;
+ void forward(PassType passType) override;
+ void backward(const UpdateCallback& callback = nullptr) override;
+ void setInDims();
+ void setOutDims();
+
+protected:
+ std::vector> nchw2nhwc_;
+ std::vector> nhwc2nchw_;
+ TensorShape inDims_;
+ TensorShape outDims_;
+ std::vector heightAxis_;
+ std::vector widthAxis_;
+ size_t reshapeHeight_;
+ size_t reshapeWidth_;
+};
+} // namespace paddle
diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp
index de1635be2af37cd0ba49010199a417090865b0e4..2f48e5b2d3ffc9337ed1314f6db6549e56263fdd 100644
--- a/paddle/gserver/tests/MKLDNNTester.cpp
+++ b/paddle/gserver/tests/MKLDNNTester.cpp
@@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn,
initTestLayer(
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
}
- dnnLayer_ = testLayers_[DNN];
refLayer_ = testLayers_[REF];
+ dnnLayer_ = std::dynamic_pointer_cast(testLayers_[DNN]);
+ CHECK(dnnLayer_);
+ // for comparison with Paddle reference results,
+ // need manually add cpu device output for test
+ dnnLayer_->addOutputArgument(CPU_DEVICE);
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
@@ -109,20 +113,22 @@ void MKLDNNTester::randomBotDatas() {
void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform();
- dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad()));
- VLOG(lvl_) << "Random dom Backward Input, TopDiff: ";
+ dnnLayer_->getOutput(CPU_DEVICE)
+ .grad->copyFrom(*(refLayer_->getOutputGrad()));
+ VLOG(lvl_) << "Random Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad());
}
void MKLDNNTester::checkForward() {
- printTopDatas();
- double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
- testLayers_[REF]->getOutputValue());
VLOG(MKLDNN_ALL) << "Check Forward";
+ printTopDatas();
+ double delta = compareMatrix(dnnLayer_->getOutput(-1).value,
+ refLayer_->getOutputValue());
EXPECT_LE(fabs(delta), eps_);
}
void MKLDNNTester::checkBackwardData() {
+ VLOG(MKLDNN_ALL) << "Check Backward Data";
// TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
@@ -144,14 +150,12 @@ void MKLDNNTester::checkBackwardData() {
}
void MKLDNNTester::checkBackwardWgts() {
+ VLOG(MKLDNN_ALL) << "Check Backward Weight";
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts);
- const MKLDNNLayerPtr dnnlayer =
- std::dynamic_pointer_cast(dnnLayer_);
- CHECK(dnnlayer);
- dnnlayer->convertWeightsToPaddle();
+ dnnLayer_->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
@@ -189,38 +193,38 @@ void MKLDNNTester::restoreWgt(const vector& from,
}
// clear parameters grad
-void MKLDNNTester::clearWgtDiffs() {
+void MKLDNNTester::clearWgtDiffs(size_t id) {
+ CHECK_LE(id, parameters_.size());
for (size_t n = 0; n < parameters_.size(); ++n) {
- for (size_t i = 0; i < parameters_[n].size(); ++i) {
- const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
- if (grad) {
- grad->zeroMem();
+ if (id == n || id == parameters_.size()) {
+ for (size_t i = 0; i < parameters_[n].size(); ++i) {
+ const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
+ if (grad) {
+ grad->zeroMem();
+ }
}
}
}
}
-void MKLDNNTester::clearBotDiffs() {
- // dnn and ref
+void MKLDNNTester::clearBotDiffs(size_t id) {
+ CHECK_LE(id, dataLayers_.size());
for (size_t n = 0; n < dataLayers_.size(); ++n) {
- // all inputs layers
- for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
- dataLayers_[n][i]->getOutputGrad()->zeroMem();
+ if (id == n || id == dataLayers_.size()) {
+ // clear inputs layers of this specific layer
+ for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
+ dataLayers_[n][i]->getOutputGrad()->zeroMem();
+ }
}
}
}
-void MKLDNNTester::clearBotDiffs(int n) {
- CHECK_LT(n, NUM);
- // all inputs layers
- for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
- dataLayers_[n][i]->getOutputGrad()->zeroMem();
- }
-}
-
-void MKLDNNTester::clearTopDatas() {
+void MKLDNNTester::clearTopDatas(size_t id) {
+ CHECK_LE(id, testLayers_.size());
for (size_t i = 0; i < testLayers_.size(); ++i) {
- testLayers_[i]->getOutputValue()->zeroMem();
+ if (id == i || id == testLayers_.size()) {
+ testLayers_[i]->getOutputValue()->zeroMem();
+ }
}
}
@@ -300,16 +304,24 @@ void MKLDNNTester::runOnce() {
checkForward();
// test backward
+ // simple updater
+ UpdateCallback updateCallback = [](Parameter* para) {
+ auto& grad = para->getBuf(PARAMETER_GRADIENT);
+ auto& value = para->getBuf(PARAMETER_VALUE);
+ real lr = 1e-3;
+ value->add(*grad, lr);
+ };
randomTopDiffs();
- dnnLayer_->backward(nullptr);
- refLayer_->backward(nullptr);
+ dnnLayer_->backward(updateCallback);
+ refLayer_->backward(updateCallback);
checkBackwardData();
checkBackwardWgts();
// clear buffers
// ref code will addto the diff, dnn code will writeto it
- // and clearTopDatas() and clearWgtDiffs() should be coverd by test layers
+ // and clearTopDatas(REF) should be coverd by ref layers
clearBotDiffs(REF);
+ clearWgtDiffs(REF);
}
void MKLDNNTester::run(const TestConfig& dnn,
diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h
index e55e4493ffdfe45b8cfdee423febd1878b8b3d8a..5ac885638cde7693a0c847733e7a6149c1b7e6c2 100644
--- a/paddle/gserver/tests/MKLDNNTester.h
+++ b/paddle/gserver/tests/MKLDNNTester.h
@@ -18,6 +18,7 @@ limitations under the License. */
#include
#include "LayerGradUtil.h"
#include "paddle/gserver/layers/MKLDNNBase.h"
+#include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle {
@@ -40,7 +41,8 @@ protected:
vector layerMaps_;
vector> parameters_;
vector testLayers_;
- LayerPtr dnnLayer_, refLayer_;
+ LayerPtr refLayer_;
+ MKLDNNLayerPtr dnnLayer_;
/// run some iterations, all the result should pass
size_t iter_;
@@ -88,10 +90,10 @@ private:
void checkBackwardData();
void checkBackwardWgts();
- void clearWgtDiffs();
- void clearBotDiffs();
- void clearBotDiffs(int n); // clear specific layer
- void clearTopDatas();
+ // clear specific layer, clear all when id equals NUM
+ void clearWgtDiffs(size_t id = NUM);
+ void clearBotDiffs(size_t id = NUM);
+ void clearTopDatas(size_t id = NUM);
void printTopDatas();
void printMatrix(const MatrixPtr& m);
diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp
index a831ffbc73fbd6ad42fa31b2d6d583718474e59b..090bde7b203652e3ffb1662b8f5b8937885d2608 100644
--- a/paddle/gserver/tests/test_LayerGrad.cpp
+++ b/paddle/gserver/tests/test_LayerGrad.cpp
@@ -1703,6 +1703,55 @@ TEST(Layer, BatchNormalizationLayer) {
#endif
}
+void testBatchNorm3DLayer(const string& type, bool trans, bool useGpu) {
+ TestConfig config;
+ const int CHANNELS = 10;
+ const int IMG_SIZE = 16;
+ const int IMG_SIZE_Y = 8;
+ const int IMG_SIZE_Z = 8;
+ size_t size = CHANNELS * IMG_SIZE * IMG_SIZE_Y * IMG_SIZE_Z;
+ config.layerConfig.set_type(type);
+ config.layerConfig.set_size(size);
+ config.layerConfig.set_active_type("sigmoid");
+ config.biasSize = CHANNELS;
+ config.inputDefs.push_back({INPUT_DATA,
+ "layer_0",
+ /* dim= */ size,
+ /* paraSize= */ CHANNELS});
+
+ config.inputDefs.push_back({INPUT_DATA, "layer_1_running_mean", 1, CHANNELS});
+ config.inputDefs.back().isStatic = true;
+ config.inputDefs.push_back({INPUT_DATA, "layer_2_running_var", 1, CHANNELS});
+ config.inputDefs.back().isStatic = true;
+
+ LayerInputConfig* input = config.layerConfig.add_inputs();
+ config.layerConfig.add_inputs();
+ config.layerConfig.add_inputs();
+
+ ImageConfig* img_conf = input->mutable_image_conf();
+ img_conf->set_channels(CHANNELS);
+ img_conf->set_img_size(IMG_SIZE);
+ img_conf->set_img_size_y(IMG_SIZE_Y);
+ img_conf->set_img_size_z(IMG_SIZE_Z);
+
+ testLayerGrad(config,
+ "batch_norm",
+ 64,
+ /* trans= */ trans,
+ useGpu,
+ /* useWeight */ true);
+}
+
+TEST(Layer, testBatchNorm3DLayer) {
+ testBatchNorm3DLayer("batch_norm", false, false);
+#ifndef PADDLE_ONLY_CPU
+ testBatchNorm3DLayer("batch_norm", false, true);
+ if (hl_get_cudnn_lib_version() >= int(4000)) {
+ testBatchNorm3DLayer("cudnn_batch_norm", false, true);
+ }
+#endif
+}
+
void testConvOperator(bool isDeconv) {
TestConfig config;
const int NUM_FILTERS = 16;
@@ -2008,6 +2057,31 @@ TEST(Layer, CropLayer) {
}
}
+TEST(Layer, SwitchOrderLayer) {
+ TestConfig config;
+ // config input_0
+ config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 0});
+ LayerInputConfig* input = config.layerConfig.add_inputs();
+ ImageConfig* img = input->mutable_image_conf();
+ img->set_channels(4);
+ img->set_img_size(16);
+ img->set_img_size_y(16);
+
+ ReshapeConfig* reshape = config.layerConfig.mutable_reshape_conf();
+ reshape->add_height_axis(0);
+ reshape->add_height_axis(1);
+ reshape->add_height_axis(2);
+ reshape->add_width_axis(3);
+
+ // config softmax layer
+ config.layerConfig.set_type("switch_order");
+ config.layerConfig.set_name("switchOrderLayer");
+
+ for (auto useGpu : {false, true}) {
+ testLayerGrad(config, "switch_order", 100, false, useGpu, true);
+ }
+}
+
vector randSampling(real range, int n) {
CHECK_GE(range, n);
vector num(range);
@@ -2228,26 +2302,27 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) {
conv->set_stride(2);
conv->set_stride_y(2);
conv->set_stride_z(2);
- conv->set_img_size(IMAGE_SIZE);
- conv->set_img_size_y(IMAGE_SIZE_Y);
- conv->set_img_size_z(IMAGE_SIZE_Z);
- conv->set_output_x(imageSize(conv->img_size(),
+ conv->set_output_x(IMAGE_SIZE);
+ conv->set_output_y(IMAGE_SIZE_Y);
+ conv->set_output_z(IMAGE_SIZE_Z);
+
+ conv->set_img_size(imageSize(conv->output_x(),
conv->filter_size(),
conv->padding(),
conv->stride(),
true));
- conv->set_output_y(imageSize(conv->img_size_y(),
- conv->filter_size_y(),
- conv->padding_y(),
- conv->stride_y(),
- true));
- conv->set_output_z(imageSize(conv->img_size_z(),
- conv->filter_size_z(),
- conv->padding_z(),
- conv->stride_z(),
- true));
- config.layerConfig.set_size(conv->output_x() * conv->output_y() *
- conv->output_z() * NUM_FILTERS);
+ conv->set_img_size_y(imageSize(conv->output_y(),
+ conv->filter_size_y(),
+ conv->padding_y(),
+ conv->stride_y(),
+ true));
+ conv->set_img_size_z(imageSize(conv->output_z(),
+ conv->filter_size_z(),
+ conv->padding_z(),
+ conv->stride_z(),
+ true));
+ config.layerConfig.set_size(conv->img_size() * conv->img_size_y() *
+ conv->img_size_z() * NUM_FILTERS);
conv->set_groups(1);
conv->set_filter_channels(conv->channels() / conv->groups());
config.inputDefs.push_back(
diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp
index e1d2270df24331914f3a51acc90a518084b3ce4e..e70802881e3f22160a87b7a4babda07ffbcf9d6f 100644
--- a/paddle/gserver/tests/test_MKLDNN.cpp
+++ b/paddle/gserver/tests/test_MKLDNN.cpp
@@ -17,6 +17,7 @@ limitations under the License. */
#include
#include "MKLDNNTester.h"
#include "ModelConfig.pb.h"
+#include "paddle/math/MathUtils.h"
using namespace paddle; // NOLINT
@@ -63,6 +64,83 @@ TEST(MKLDNNLayer, FcLayer) {
testFcLayer({/*bs*/ 15, /*ic*/ 3, /*oc*/ 6, /*ih*/ 16, /*iw*/ 16});
}
+struct testConvDesc {
+ int bs, gp;
+ int ic, ih, iw;
+ int oc, oh, ow;
+ int fh, fw;
+ int ph, pw;
+ int sh, sw;
+ int dh, dw;
+};
+
+void testConvLayer(const testConvDesc& pm) {
+ const std::string compareTypes[] = {"mkldnn_conv", "exconv"};
+ TestConfig cfg;
+ cfg.layerConfig.set_type(compareTypes[0]);
+ cfg.layerConfig.set_num_filters(pm.oc);
+ cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
+ // cfg.layerConfig.set_partial_sum(1); // TODO: check it
+ cfg.layerConfig.set_shared_biases(true);
+ cfg.inputDefs.push_back(
+ {INPUT_DATA,
+ "layer_0",
+ /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
+ /* size of weight= */ size_t(pm.oc * pm.ic * pm.fh * pm.fw / pm.gp)});
+ LayerInputConfig* input = cfg.layerConfig.add_inputs();
+ ConvConfig* conv = input->mutable_conv_conf();
+ conv->set_groups(pm.gp);
+ conv->set_img_size(pm.iw);
+ conv->set_img_size_y(pm.ih);
+ conv->set_output_x(pm.ow);
+ conv->set_output_y(pm.oh);
+ conv->set_filter_size(pm.fw);
+ conv->set_filter_size_y(pm.fh);
+ conv->set_channels(pm.ic);
+ conv->set_padding(pm.pw);
+ conv->set_padding_y(pm.ph);
+ conv->set_stride(pm.sw);
+ conv->set_stride_y(pm.sh);
+ conv->set_dilation(pm.dw);
+ conv->set_dilation_y(pm.dh);
+ conv->set_caffe_mode(true);
+ conv->set_filter_channels(conv->channels() / conv->groups());
+ CHECK_EQ(conv->filter_channels() * pm.gp, conv->channels())
+ << "it is indivisible";
+
+ int fh = (pm.fh - 1) * pm.dh + 1;
+ int fw = (pm.fw - 1) * pm.dw + 1;
+ int ow = outputSize(pm.iw, fw, pm.pw, pm.sw, true);
+ int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
+ CHECK_EQ(ow, pm.ow) << "output size check failed";
+ CHECK_EQ(oh, pm.oh) << "output size check failed";
+
+ MKLDNNTester tester;
+ for (auto biasSize : {pm.oc, 0}) {
+ cfg.biasSize = biasSize;
+ TestConfig ref = cfg;
+ ref.layerConfig.set_type(compareTypes[1]);
+ for (auto bs : {pm.bs, 1}) {
+ tester.run(cfg, ref, bs, pm.ih, pm.iw);
+ }
+ }
+}
+
+TEST(MKLDNNLayer, ConvLayer) {
+ /* bs, gp, ic, ih, iw, oc, oh, ow, fh, fw, ph, pw, sh, sw, dh, dw */
+ testConvLayer({2, 1, 3, 32, 32, 16, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({2, 1, 8, 16, 16, 8, 16, 16, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({3, 1, 16, 32, 32, 3, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({8, 1, 16, 18, 18, 32, 18, 18, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({16, 1, 1, 42, 31, 32, 23, 11, 4, 5, 3, 2, 2, 3, 1, 1});
+ testConvLayer({2, 1, 8, 16, 16, 8, 8, 8, 3, 3, 1, 1, 2, 2, 1, 1});
+ testConvLayer({3, 1, 8, 13, 13, 8, 7, 7, 3, 3, 1, 1, 2, 2, 1, 1});
+ // with groups
+ testConvLayer({2, 2, 4, 5, 5, 8, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({2, 3, 3, 5, 5, 3, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({4, 4, 16, 3, 3, 16, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1});
+}
+
// TODO(TJ): add branch test
int main(int argc, char** argv) {
diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp
index 0a355e2644cce572ce90ecf5c9d2a5b7b395bc61..0778bb63b7b3bca9b3d2647ca43dad72d783950a 100644
--- a/paddle/math/MKLDNNMatrix.cpp
+++ b/paddle/math/MKLDNNMatrix.cpp
@@ -33,14 +33,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) {
size_t width = cnts / dims[0];
m = Matrix::create(height, width, false, false);
}
-
CHECK(m) << " Matrix should not be empty";
+
CpuMatrixPtr cpuMatrix = std::dynamic_pointer_cast(m);
CHECK(cpuMatrix) << "Only support create from CPU matrix yet";
-
- CHECK_EQ(cnts, m->getElementCnt()) << "Count size does not match";
- return std::make_shared(
- m->getData(), m->getHeight(), m->getWidth(), pd);
+ CHECK_EQ(cpuMatrix->getElementCnt(), cnts) << "Count size does not match";
+ return std::make_shared(cpuMatrix, pd);
}
MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m,
@@ -51,6 +49,27 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m,
return create(m, memory::primitive_desc(memory::desc(dims, dtype, fmt), eg));
}
+std::shared_ptr MKLDNNMatrix::createReorder(const MKLDNNMatrixPtr& src,
+ const MKLDNNMatrixPtr& dst,
+ bool checkData) {
+ if (src == dst || src->getPrimitiveDesc() == dst->getPrimitiveDesc()) {
+ return nullptr;
+ }
+
+ if (checkData && (src->getData() == dst->getData())) {
+ LOG(FATAL) << "can not create reorder with inplace data";
+ return nullptr;
+ }
+
+ memory::dims srcDims = src->getDims();
+ memory::dims dstDims = dst->getDims();
+ CHECK_EQ(srcDims.size(), dstDims.size());
+ for (size_t i = 0; i < srcDims.size(); ++i) {
+ CHECK_EQ(srcDims[i], dstDims[i]);
+ }
+ return std::make_shared(*src, *dst);
+}
+
void MKLDNNMatrix::reorderDataFrom(const MKLDNNMatrixPtr& m,
memory::format srcFmt,
memory::dims targetDim) {
@@ -138,7 +157,7 @@ void MKLDNNMatrix::downSpatial() {
mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
"could not create a memory primitive");
reset(result);
- set_data_handle(getData());
+ set_data_handle(data_);
}
} // namespace paddle
diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h
index e50f698b495713e6f15ab7a12a7ee7487662040f..0aa130b4a0d458ad78d5d1330164af9e73b22a44 100644
--- a/paddle/math/MKLDNNMatrix.h
+++ b/paddle/math/MKLDNNMatrix.h
@@ -30,11 +30,10 @@ typedef std::shared_ptr MKLDNNMatrixPtr;
*/
class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory {
public:
- MKLDNNMatrix(real* data,
- size_t height,
- size_t width,
- mkldnn::memory::primitive_desc pd)
- : CpuMatrix(data, height, width, false), mkldnn::memory(pd, data) {}
+ MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd)
+ : CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false),
+ mkldnn::memory(pd, m->getData()),
+ m_(m) {}
~MKLDNNMatrix() {}
@@ -53,6 +52,31 @@ public:
mkldnn::engine& eg,
mkldnn::memory::data_type dtype = mkldnn::memory::data_type::f32);
+ /**
+ * Create Memory descriptor.
+ * default with any format and f32 dtype
+ */
+ static mkldnn::memory::desc createMemoryDesc(
+ const mkldnn::memory::dims& dims,
+ const mkldnn::memory::format& fmt = mkldnn::memory::format::any,
+ const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) {
+ return mkldnn::memory::desc(dims, dtype, fmt);
+ }
+
+ /**
+ * Create reorder primitive.
+ * Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
+ * checkData: for whether to check the data handle of src and dst is the same.
+ * if true, means check it and do not want support inplace reorder;
+ * otherwise do not check data which means the created reorder
+ * maybe inplace buffer and do not guarantee the logical is correct
+ * since not all format or conversion support inplace.
+ */
+ static std::shared_ptr createReorder(
+ const MKLDNNMatrixPtr& src,
+ const MKLDNNMatrixPtr& dst,
+ bool checkData = true);
+
public:
/**
* Reorder this MKLDNNMatrix from other format.
@@ -81,11 +105,29 @@ public:
void downSpatial();
/**
- * Update the memory data handle.
+ * set the memory data handle.
* Caution: This will not check the buffer size of the data,
* it should be coverd by user.
*/
- void updateData(void* data) { set_data_handle(data); }
+ void setData(real* data) {
+ set_data_handle(data);
+ CpuMatrix::setData(data);
+ m_.reset();
+ }
+
+ /**
+ * override Matrix::getData
+ * check data before return
+ */
+ real* getData() override {
+ CHECK_EQ((void*)data_, get_data_handle());
+ return data_;
+ }
+
+ const real* getData() const override {
+ CHECK_EQ((void*)data_, get_data_handle());
+ return data_;
+ }
/**
* Get primitive descriptor.
@@ -143,6 +185,10 @@ protected:
memory::format srcFmt,
memory::format dstFmt,
memory::dims dm);
+
+private:
+ // save the CpuMatrixPtr in case the buffer released outside
+ CpuMatrixPtr m_;
};
} // namespace paddle
diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp
index c8ba1074a1555bbddde7e5f0fb2a046138b27c09..c2f17beeb87942ea681f5d388659c0d280157b26 100644
--- a/paddle/math/MathFunctions.cpp
+++ b/paddle/math/MathFunctions.cpp
@@ -84,6 +84,7 @@ LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP)
namespace paddle {
+#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <>
void gemm(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB,
@@ -143,6 +144,7 @@ void gemm(const CBLAS_TRANSPOSE transA,
C,
ldc);
}
+#endif
template <>
int getrf(const CBLAS_ORDER order,
@@ -182,6 +184,7 @@ int getri(const CBLAS_ORDER order,
return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv);
}
+#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <>
void axpy(const int n, const float alpha, const float* x, float* y) {
cblas_saxpy(n, alpha, x, 1, y, 1);
@@ -201,6 +204,7 @@ template <>
double dotProduct(const int n, const double* x, const double* y) {
return cblas_ddot(n, x, 1, y, 1);
}
+#endif
#if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML)
diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h
index 637643838ff433753e0cbb9154ee069c2f7c6d15..e8ea6e37ac527a19c529d1731b94bed970211755 100644
--- a/paddle/math/MathFunctions.h
+++ b/paddle/math/MathFunctions.h
@@ -40,7 +40,14 @@ extern "C" {
#ifndef LAPACK_FOUND
extern "C" {
+#ifndef PADDLE_USE_EIGEN_FOR_BLAS
#include
+#else
+typedef enum CBLAS_ORDER {
+ CblasRowMajor = 101,
+ CblasColMajor = 102
+} CBLAS_ORDER;
+#endif
int LAPACKE_sgetrf(
int matrix_layout, int m, int n, float* a, int lda, int* ipiv);
int LAPACKE_dgetrf(
@@ -56,6 +63,7 @@ int LAPACKE_dgetri(
namespace paddle {
+#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template
void gemm(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB,
@@ -70,6 +78,7 @@ void gemm(const CBLAS_TRANSPOSE transA,
const T beta,
T* C,
const int ldc);
+#endif
template